Ejemplo n.º 1
0
static int prism2_open(struct net_device *dev)
{
	local_info_t *local = (local_info_t *) dev->priv;

	PDEBUG(DEBUG_FLOW, "%s: prism2_open\n", dev->name);

	if (local->func->dev_open && local->func->dev_open(local))
		return 1;

#ifdef NEW_MODULE_CODE
	if (!try_module_get(local->hw_module))
		return -ENODEV;
#elif MODULE
	__MOD_INC_USE_COUNT(local->hw_module);
#endif

	if (!local->dev_enabled && local->func->hw_enable(dev, 1)) {
		printk(KERN_WARNING "%s: could not enable MAC port\n",
		       dev->name);
		prism2_close(dev);
		return 1;
	}
	if (!local->dev_enabled)
		prism2_callback(local, PRISM2_CALLBACK_ENABLE);
	local->dev_enabled = 1;

	netif_device_attach(dev);
	netif_start_queue(dev);

	return 0;
}
Ejemplo n.º 2
0
static int __init init(void)
{
	/* NULL if ip_conntrack not a module */
	if (ip_conntrack_module)
		__MOD_INC_USE_COUNT(ip_conntrack_module);
	return ipt_register_match(&state_match);
}
Ejemplo n.º 3
0
/* 
 * 	Must be already protected by lock 
 */
static void __ipsec_alg_usage_inc(struct ipsec_alg *ixt) {
#ifdef MODULE
	if (ixt->ixt_module) {
		__MOD_INC_USE_COUNT(ixt->ixt_module);
	}
#endif
	atomic_inc(&ixt->ixt_refcnt);
}
Ejemplo n.º 4
0
int mcp_register(struct mcp *mcp)
{
	if (mcp_if)
		return -EBUSY;
	if (mcp->owner)
		__MOD_INC_USE_COUNT(mcp->owner);
	mcp_if = mcp;
	return 0;
}
Ejemplo n.º 5
0
static int init_or_cleanup(int init)
{
	int ret = 0;

	if (!init) goto cleanup;

	ret = ip_nat_rule_init();
	if (ret < 0) {
		printk("ip_nat_init: can't setup rules.\n");
		goto cleanup_nothing;
	}
	ret = ip_nat_init();
	if (ret < 0) {
		printk("ip_nat_init: can't setup rules.\n");
		goto cleanup_rule_init;
	}
	ret = nf_register_hook(&ip_nat_in_ops);
	if (ret < 0) {
		printk("ip_nat_init: can't register in hook.\n");
		goto cleanup_nat;
	}
	ret = nf_register_hook(&ip_nat_out_ops);
	if (ret < 0) {
		printk("ip_nat_init: can't register out hook.\n");
		goto cleanup_inops;
	}
	ret = nf_register_hook(&ip_nat_local_out_ops);
	if (ret < 0) {
		printk("ip_nat_init: can't register local out hook.\n");
		goto cleanup_outops;
	}
	if (ip_conntrack_module)
		__MOD_INC_USE_COUNT(ip_conntrack_module);
	return ret;

 cleanup:
	if (ip_conntrack_module)
		__MOD_DEC_USE_COUNT(ip_conntrack_module);
	nf_unregister_hook(&ip_nat_local_out_ops);
 cleanup_outops:
	nf_unregister_hook(&ip_nat_out_ops);
 cleanup_inops:
	nf_unregister_hook(&ip_nat_in_ops);
 cleanup_nat:
	ip_nat_cleanup();
 cleanup_rule_init:
	ip_nat_rule_cleanup();
 cleanup_nothing:
	MUST_BE_READ_WRITE_UNLOCKED(&ip_nat_lock);
	return ret;
}
Ejemplo n.º 6
0
static int sound_open(struct inode *inode, struct file *file)
{
	int dev = MINOR(inode->i_rdev);
	int retval;

	DEB(printk("sound_open(dev=%d)\n", dev));
	if ((dev >= SND_NDEVS) || (dev < 0)) {
		printk(KERN_ERR "Invalid minor device %d\n", dev);
		return -ENXIO;
	}
	switch (dev & 0x0f) {
	case SND_DEV_CTL:
		dev >>= 4;
		if (dev >= 0 && dev < MAX_MIXER_DEV && mixer_devs[dev] == NULL) {
			char modname[20];
			sprintf(modname, "mixer%d", dev);
			request_module(modname);
		}
		if (dev && (dev >= num_mixers || mixer_devs[dev] == NULL))
			return -ENXIO;

		if (mixer_devs[dev]->owner)
			__MOD_INC_USE_COUNT (mixer_devs[dev]->owner);
		break;

	case SND_DEV_SEQ:
	case SND_DEV_SEQ2:
		if ((retval = sequencer_open(dev, file)) < 0)
			return retval;
		break;

	case SND_DEV_MIDIN:
		if ((retval = MIDIbuf_open(dev, file)) < 0)
			return retval;
		break;

	case SND_DEV_DSP:
	case SND_DEV_DSP16:
	case SND_DEV_AUDIO:
		if ((retval = audio_open(dev, file)) < 0)
			return retval;
		break;

	default:
		printk(KERN_ERR "Invalid minor device %d\n", dev);
		return -ENXIO;
	}

	return 0;
}
Ejemplo n.º 7
0
/*****************************************************************************
 * Driver tty interface functions
 *****************************************************************************/
static int serial_open (struct tty_struct *tty, struct file * filp)
{
	struct usb_serial *serial;
	struct usb_serial_port *port;
	unsigned int portNumber;
	int retval = 0;
	
	dbg("%s", __FUNCTION__);

	/* initialize the pointer incase something fails */
	tty->driver_data = NULL;

	/* get the serial object associated with this tty pointer */
	serial = get_serial_by_minor (MINOR(tty->device));

	if (serial_paranoia_check (serial, __FUNCTION__))
		return -ENODEV;

	/* set up our port structure making the tty driver remember our port object, and us it */
	portNumber = MINOR(tty->device) - serial->minor;
	port = &serial->port[portNumber];
	tty->driver_data = port;

	down (&port->sem);
	port->tty = tty;
	 
	/* lock this module before we call it */
	if (serial->type->owner)
		__MOD_INC_USE_COUNT(serial->type->owner);

	++port->open_count;
	if (port->open_count == 1) {
		/* only call the device specific open if this 
		 * is the first time the port is opened */
		if (serial->type->open)
			retval = serial->type->open(port, filp);
		else
			retval = generic_open(port, filp);
	}

	if (retval) {
		port->open_count = 0;
		if (serial->type->owner)
			__MOD_DEC_USE_COUNT(serial->type->owner);
	}

	up (&port->sem);
	return retval;
}
Ejemplo n.º 8
0
struct inode * proc_get_inode(struct super_block * sb, int ino,
				struct proc_dir_entry * de)
{
	struct inode * inode;

	/*
	 * Increment the use count so the dir entry can't disappear.
	 */
	de_get(de);
#if 1
/* shouldn't ever happen */
if (de && de->deleted)
printk("proc_iget: using deleted entry %s, count=%d\n", de->name, atomic_read(&de->count));
#endif

	inode = iget(sb, ino);
	if (!inode)
		goto out_fail;
	
	inode->u.generic_ip = (void *) de;
	if (de) {
		if (de->mode) {
			inode->i_mode = de->mode;
			inode->i_uid = de->uid;
			inode->i_gid = de->gid;
		}
		if (de->size)
			inode->i_size = de->size;
		if (de->nlink)
			inode->i_nlink = de->nlink;
		if (de->owner)
			__MOD_INC_USE_COUNT(de->owner);
		if (S_ISBLK(de->mode)||S_ISCHR(de->mode)||S_ISFIFO(de->mode))
			init_special_inode(inode,de->mode,kdev_t_to_nr(de->rdev));
		else {
			if (de->proc_iops)
				inode->i_op = de->proc_iops;
			if (de->proc_fops)
				inode->i_fop = de->proc_fops;
		}
	}

out:
	return inode;

out_fail:
	de_put(de);
	goto out;
}			
Ejemplo n.º 9
0
long
asmlinkage sys_nfsservctl(int cmd, void *argp, void *resp)
{
	int ret = -ENOSYS;
	
#if defined(CONFIG_MODULES)
	lock_kernel();

	if (nfsd_linkage ||
	    (request_module ("nfsd") == 0 && nfsd_linkage)) {
		__MOD_INC_USE_COUNT(nfsd_linkage->owner);
		unlock_kernel();
		ret = nfsd_linkage->do_nfsservctl(cmd, argp, resp);
		__MOD_DEC_USE_COUNT(nfsd_linkage->owner);
	} else
		unlock_kernel();
#endif
	return ret;
}
Ejemplo n.º 10
0
/*****************************************************************************
 * Driver tty interface functions
 *****************************************************************************/
static int serial_open (struct tty_struct *tty, struct file * filp)
{
	struct usb_serial *serial;
	struct usb_serial_port *port;
	unsigned int portNumber;
	int retval;
	
	dbg(__FUNCTION__);

	/* initialize the pointer incase something fails */
	tty->driver_data = NULL;

	/* get the serial object associated with this tty pointer */
	serial = get_serial_by_minor (minor(tty->device));

	if (serial_paranoia_check (serial, __FUNCTION__)) {
		return -ENODEV;
	}

	/* set up our port structure making the tty driver remember our port object, and us it */
	portNumber = minor(tty->device) - serial->minor;
	port = &serial->port[portNumber];
	tty->driver_data = port;
	port->tty = tty;
	 
	/* pass on to the driver specific version of this function if it is available */
	if (serial->type->open) {
		if (serial->type->owner)
			__MOD_INC_USE_COUNT(serial->type->owner);
		retval = serial->type->open(port, filp);
		if (retval)
			__MOD_DEC_USE_COUNT(serial->type->owner);
	} else {
		retval = generic_open(port, filp);
	}

	return retval;
}
Ejemplo n.º 11
0
static int sg_open(struct inode * inode, struct file * filp)
{
    int dev=MINOR(inode->i_rdev);
    int flags=filp->f_flags;
    if (dev>=sg_template.dev_max || !scsi_generics[dev].device)
        return -ENXIO;

    if( !scsi_block_when_processing_errors(scsi_generics[dev].device) )
    {
        return -ENXIO;
    }

    if (O_RDWR!=(flags & O_ACCMODE))
        return -EACCES;

    /*
     * If we want exclusive access, then wait until the device is not
     * busy, and then set the flag to prevent anyone else from using it.
     */
    if (flags & O_EXCL)
    {
        while(scsi_generics[dev].users)
        {
            if (flags & O_NONBLOCK)
                return -EBUSY;
            interruptible_sleep_on(&scsi_generics[dev].generic_wait);
            if (signal_pending(current))
                return -ERESTARTSYS;
        }
        scsi_generics[dev].exclude=1;
    }
    else
        /*
         * Wait until nobody has an exclusive open on
         * this device.
         */
        while(scsi_generics[dev].exclude)
        {
            if (flags & O_NONBLOCK)
                return -EBUSY;
            interruptible_sleep_on(&scsi_generics[dev].generic_wait);
            if (signal_pending(current))
                return -ERESTARTSYS;
        }

    /*
     * OK, we should have grabbed the device.  Mark the thing so
     * that other processes know that we have it, and initialize the
     * state variables to known values.
     */
    if (!scsi_generics[dev].users
            && scsi_generics[dev].pending
            && scsi_generics[dev].complete)
    {
        if (scsi_generics[dev].buff != NULL)
            sg_free(scsi_generics[dev].buff,scsi_generics[dev].buff_len);
        scsi_generics[dev].buff=NULL;
        scsi_generics[dev].pending=0;
    }
    if (!scsi_generics[dev].users)
        scsi_generics[dev].timeout=SG_DEFAULT_TIMEOUT;
    if (scsi_generics[dev].device->host->hostt->module)
        __MOD_INC_USE_COUNT(scsi_generics[dev].device->host->hostt->module);
    if (sg_template.module)
        __MOD_INC_USE_COUNT(sg_template.module);
    scsi_generics[dev].users++;
    return 0;
}
Ejemplo n.º 12
0
/* Detect all SSAs attached to the machine.
   To be fast, do it on all online FC channels at the same time. */
__initfunc(int pluto_detect(Scsi_Host_Template *tpnt))
{
	int i, retry, nplutos;
	fc_channel *fc;
	Scsi_Device dev;

	tpnt->proc_dir = &proc_scsi_pluto;
	fcscount = 0;
	for_each_online_fc_channel(fc)
		fcscount++;
	PLND(("%d channels online\n", fcscount))
	if (!fcscount) {
#if defined(MODULE) && defined(CONFIG_FC4_SOC_MODULE) && defined(CONFIG_KMOD)
		request_module("soc");
		
		for_each_online_fc_channel(fc)
			fcscount++;
		if (!fcscount)
#endif
			return 0;
	}
	fcs = (struct ctrl_inquiry *) scsi_init_malloc (sizeof (struct ctrl_inquiry) * fcscount, GFP_DMA);
	if (!fcs) {
		printk ("PLUTO: Not enough memory to probe\n");
		return 0;
	}
	
	memset (fcs, 0, sizeof (struct ctrl_inquiry) * fcscount);
	memset (&dev, 0, sizeof(dev));
	atomic_set (&fcss, fcscount);
	fc_timer.function = pluto_detect_timeout;
	
	i = 0;
	for_each_online_fc_channel(fc) {
		Scsi_Cmnd *SCpnt;
		struct Scsi_Host *host;
		struct pluto *pluto;
		
		if (i == fcscount) break;
		
		PLD(("trying to find SSA\n"))

		/* If this is already registered to some other SCSI host, then it cannot be pluto */
		if (fc->scsi_name[0]) continue;
		memcpy (fc->scsi_name, "SSA", 4);
		
		fcs[i].fc = fc;
		
		fc->can_queue = PLUTO_CAN_QUEUE;
		fc->rsp_size = 64;
		fc->encode_addr = pluto_encode_addr;
		
		fc->fcp_register(fc, TYPE_SCSI_FCP, 0);
	
		SCpnt = &(fcs[i].cmd);
		host = &(fcs[i].host);
		pluto = (struct pluto *)host->hostdata;
		
		pluto->fc = fc;
	
		SCpnt->host = host;
		SCpnt->cmnd[0] = INQUIRY;
		SCpnt->cmnd[4] = 255;
		
		/* FC layer requires this, so that SCpnt->device->tagged_supported is initially 0 */
		SCpnt->device = &dev;
		
		SCpnt->cmd_len = COMMAND_SIZE(INQUIRY);
	
		SCpnt->request.rq_status = RQ_SCSI_BUSY;
		
		SCpnt->done = pluto_detect_done;
		SCpnt->bufflen = 256;
		SCpnt->buffer = fcs[i].inquiry;
		SCpnt->request_bufflen = 256;
		SCpnt->request_buffer = fcs[i].inquiry;
		PLD(("set up %d %08lx\n", i, (long)SCpnt))
		i++;
	}
	
	for (retry = 0; retry < 5; retry++) {
		for (i = 0; i < fcscount; i++) {
			if (!fcs[i].fc) break;
			if (fcs[i].cmd.request.rq_status != RQ_SCSI_DONE) {
				disable_irq(fcs[i].fc->irq);
				PLND(("queuecommand %d %d\n", retry, i))
				fcp_scsi_queuecommand (&(fcs[i].cmd), 
					pluto_detect_scsi_done);
				enable_irq(fcs[i].fc->irq);
			}
		}
	    
		fc_timer.expires = jiffies + 10 * HZ;
		add_timer(&fc_timer);
		
		down(&fc_sem);
		PLND(("Woken up\n"))
		if (!atomic_read(&fcss))
			break; /* All fc channels have answered us */
	}
	del_timer(&fc_timer);

	PLND(("Finished search\n"))
	for (i = 0, nplutos = 0; i < fcscount; i++) {
		Scsi_Cmnd *SCpnt;
		
		if (!(fc = fcs[i].fc)) break;
	
		SCpnt = &(fcs[i].cmd);
		
		/* Let FC mid-level free allocated resources */
		SCpnt->done (SCpnt);
		
		if (!SCpnt->result) {
			struct pluto_inquiry *inq;
			struct pluto *pluto;
			struct Scsi_Host *host;
			
			inq = (struct pluto_inquiry *)fcs[i].inquiry;

			if ((inq->dtype & 0x1f) == TYPE_PROCESSOR &&
			    !strncmp (inq->vendor_id, "SUN", 3) &&
			    !strncmp (inq->product_id, "SSA", 3)) {
				char *p;
				long *ages;
				
				ages = kmalloc (((inq->channels + 1) * inq->targets) * sizeof(long), GFP_KERNEL);
				if (!ages) continue;
				
				host = scsi_register (tpnt, sizeof (struct pluto));
				if (!host) panic ("Cannot register PLUTO host\n");
				
				nplutos++;
				
				if (fc->module) __MOD_INC_USE_COUNT(fc->module);
				
				pluto = (struct pluto *)host->hostdata;
				
				host->max_id = inq->targets;
				host->max_channel = inq->channels;
				host->irq = fc->irq;
				
				host->select_queue_depths = pluto_select_queue_depths;
				
				fc->channels = inq->channels + 1;
				fc->targets = inq->targets;
				fc->ages = ages;
				memset (ages, 0, ((inq->channels + 1) * inq->targets) * sizeof(long));
				
				pluto->fc = fc;
				memcpy (pluto->rev_str, inq->revision, 4);
				pluto->rev_str[4] = 0;
				p = strchr (pluto->rev_str, ' ');
				if (p) *p = 0;
				memcpy (pluto->fw_rev_str, inq->fw_revision, 4);
				pluto->fw_rev_str[4] = 0;
				p = strchr (pluto->fw_rev_str, ' ');
				if (p) *p = 0;
				memcpy (pluto->serial_str, inq->serial, 12);
				pluto->serial_str[12] = 0;
				p = strchr (pluto->serial_str, ' ');
				if (p) *p = 0;
				
				PLD(("Found SSA rev %s fw rev %s serial %s %dx%d\n", pluto->rev_str, pluto->fw_rev_str, pluto->serial_str, host->max_channel, host->max_id))
			} else
				fc->fcp_register(fc, TYPE_SCSI_FCP, 1);
		} else
Ejemplo n.º 13
0
static inline int
do_load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
	struct file * file;
	struct dentry *interpreter_dentry = NULL; /* to shut gcc up */
 	unsigned long load_addr = 0, load_bias;
	int load_addr_set = 0;
	char * elf_interpreter = NULL;
	unsigned int interpreter_type = INTERPRETER_NONE;
	unsigned char ibcs2_interpreter = 0;
	mm_segment_t old_fs;
	unsigned long error;
	struct elf_phdr * elf_ppnt, *elf_phdata;
	unsigned long elf_bss, k, elf_brk;
	int elf_exec_fileno;
	int retval, size, i;
	unsigned long elf_entry, interp_load_addr = 0;
	unsigned long start_code, end_code, end_data;
	struct elfhdr elf_ex;
	struct elfhdr interp_elf_ex;
  	struct exec interp_ex;
	char passed_fileno[6];

	/* Get the exec-header */
	elf_ex = *((struct elfhdr *) bprm->buf);

	retval = -ENOEXEC;
	/* First of all, some simple consistency checks */
	if (elf_ex.e_ident[0] != 0x7f ||
	    strncmp(&elf_ex.e_ident[1], "ELF", 3) != 0)
		goto out;

	if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
		goto out;
	if (!elf_check_arch(elf_ex.e_machine))
		goto out;
#ifdef __mips__

/* allow only mips1 if exec is MIPSEB elf, 
	because IRIX binaries handled elsewhere. */

/* borrowed from binutils/include/elf/common.h*/
#define EI_DATA         5               /* Data encoding */
#define ELFDATA2MSB     2               /* 2's complement, big endian */

	if ((elf_ex.e_ident[EI_DATA] == ELFDATA2MSB ) &&
		(elf_ex.e_flags & EF_MIPS_ARCH) ) {
			retval = -ENOEXEC;
			goto out;
	}
#endif
	if (!bprm->dentry->d_inode->i_op		   ||
	    !bprm->dentry->d_inode->i_op->default_file_ops ||
	    !bprm->dentry->d_inode->i_op->default_file_ops->mmap)
		goto out;

	/* Now read in all of the header information */

	retval = -ENOMEM;
	size = elf_ex.e_phentsize * elf_ex.e_phnum;
	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
	if (!elf_phdata)
		goto out;

	retval = read_exec(bprm->dentry, elf_ex.e_phoff,
				(char *) elf_phdata, size, 1);
	if (retval < 0)
		goto out_free_ph;

	retval = open_dentry(bprm->dentry, O_RDONLY);
	if (retval < 0)
		goto out_free_ph;
	elf_exec_fileno = retval;
	file = fget(elf_exec_fileno);

	elf_ppnt = elf_phdata;
	elf_bss = 0;
	elf_brk = 0;

	start_code = ~0UL;
	end_code = 0;
	end_data = 0;

	for (i = 0; i < elf_ex.e_phnum; i++) {
		if (elf_ppnt->p_type == PT_INTERP) {
			retval = -EINVAL;
		  	if (elf_interpreter)
				goto out_free_interp;

			/* This is the program interpreter used for
			 * shared libraries - for now assume that this
			 * is an a.out format binary
			 */

			retval = -ENOMEM;
			elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
							   GFP_KERNEL);
			if (!elf_interpreter)
				goto out_free_file;

			retval = read_exec(bprm->dentry, elf_ppnt->p_offset,
					   elf_interpreter,
					   elf_ppnt->p_filesz, 1);
			if (retval < 0)
				goto out_free_interp;
			/* If the program interpreter is one of these two,
			 * then assume an iBCS2 image. Otherwise assume
			 * a native linux image.
			 */
			if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
			    strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
				ibcs2_interpreter = 1;
#if 0
			printk("Using ELF interpreter %s\n", elf_interpreter);
#endif
			old_fs = get_fs(); /* This could probably be optimized */
			set_fs(get_ds());
#ifdef __sparc__
			if (ibcs2_interpreter) {
				unsigned long old_pers = current->personality;
					
				current->personality = PER_SVR4;
				interpreter_dentry = open_namei(elf_interpreter,
								0, 0);
				current->personality = old_pers;
			} else
#endif					
				interpreter_dentry = open_namei(elf_interpreter,
								0, 0);
			set_fs(old_fs);
			retval = PTR_ERR(interpreter_dentry);
			if (IS_ERR(interpreter_dentry))
				goto out_free_interp;
			retval = permission(interpreter_dentry->d_inode, MAY_EXEC);
			if (retval < 0)
				goto out_free_dentry;
			retval = read_exec(interpreter_dentry, 0, bprm->buf, 128, 1);
			if (retval < 0)
				goto out_free_dentry;

			/* Get the exec headers */
			interp_ex = *((struct exec *) bprm->buf);
			interp_elf_ex = *((struct elfhdr *) bprm->buf);
		}
		elf_ppnt++;
	}

	/* Some simple consistency checks for the interpreter */
	if (elf_interpreter) {
		interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;

		/* Now figure out which format our binary is */
		if ((N_MAGIC(interp_ex) != OMAGIC) &&
		    (N_MAGIC(interp_ex) != ZMAGIC) &&
		    (N_MAGIC(interp_ex) != QMAGIC))
			interpreter_type = INTERPRETER_ELF;

		if (interp_elf_ex.e_ident[0] != 0x7f ||
		    strncmp(&interp_elf_ex.e_ident[1], "ELF", 3) != 0)
			interpreter_type &= ~INTERPRETER_ELF;

		retval = -ELIBBAD;
		if (!interpreter_type)
			goto out_free_dentry;

		/* Make sure only one type was selected */
		if ((interpreter_type & INTERPRETER_ELF) &&
		     interpreter_type != INTERPRETER_ELF) {
			printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
			interpreter_type = INTERPRETER_ELF;
		}
	}

	/* OK, we are done with that, now set up the arg stuff,
	   and then start this sucker up */

	if (!bprm->sh_bang) {
		char * passed_p;

		if (interpreter_type == INTERPRETER_AOUT) {
		  sprintf(passed_fileno, "%d", elf_exec_fileno);
		  passed_p = passed_fileno;

		  if (elf_interpreter) {
		    bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p,2);
		    bprm->argc++;
		  }
		}
		retval = -E2BIG;
		if (!bprm->p)
			goto out_free_dentry;
	}

	/* Flush all traces of the currently running executable */
	retval = flush_old_exec(bprm);
	if (retval)
		goto out_free_dentry;

	/* OK, This is the point of no return */
	current->mm->end_data = 0;
	current->mm->end_code = 0;
	current->mm->mmap = NULL;
	current->flags &= ~PF_FORKNOEXEC;
	elf_entry = (unsigned long) elf_ex.e_entry;

	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
	   may depend on the personality.  */
	SET_PERSONALITY(elf_ex, ibcs2_interpreter);

	/* Do this so that we can load the interpreter, if need be.  We will
	   change some of these later */
	current->mm->rss = 0;
	bprm->p = setup_arg_pages(bprm->p, bprm);
	current->mm->start_stack = bprm->p;

	/* Try and get dynamic programs out of the way of the default mmap
	   base, as well as whatever program they might try to exec.  This
	   is because the brk will follow the loader, and is not movable.  */

	load_bias = ELF_PAGESTART(elf_ex.e_type==ET_DYN ? ELF_ET_DYN_BASE : 0);

	/* Now we do a little grungy work by mmaping the ELF image into
	   the correct location in memory.  At this point, we assume that
	   the image should be loaded at fixed address, not at a variable
	   address. */

	old_fs = get_fs();
	set_fs(get_ds());
	for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
		int elf_prot = 0, elf_flags;
		unsigned long vaddr;

		if (elf_ppnt->p_type != PT_LOAD)
			continue;

		if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
		if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
		if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;

		elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;

		vaddr = elf_ppnt->p_vaddr;
		if (elf_ex.e_type == ET_EXEC || load_addr_set) {
			elf_flags |= MAP_FIXED;
		}

		error = do_mmap(file, ELF_PAGESTART(load_bias + vaddr),
		                (elf_ppnt->p_filesz +
		                ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
		                elf_prot, elf_flags, (elf_ppnt->p_offset -
		                ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));

		if (!load_addr_set) {
			load_addr_set = 1;
			load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
			if (elf_ex.e_type == ET_DYN) {
				load_bias += error -
				             ELF_PAGESTART(load_bias + vaddr);
				load_addr += error;
			}
		}
		k = elf_ppnt->p_vaddr;
		if (k < start_code) start_code = k;
		k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
		if (k > elf_bss)
			elf_bss = k;
		if ((elf_ppnt->p_flags & PF_X) && end_code <  k)
			end_code = k;
		if (end_data < k)
			end_data = k;
		k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
		if (k > elf_brk)
			elf_brk = k;
	}
	set_fs(old_fs);
	fput(file); /* all done with the file */

	elf_entry += load_bias;
	elf_bss += load_bias;
	elf_brk += load_bias;
	start_code += load_bias;
	end_code += load_bias;
	end_data += load_bias;

	if (elf_interpreter) {
		if (interpreter_type == INTERPRETER_AOUT)
			elf_entry = load_aout_interp(&interp_ex,
						     interpreter_dentry);
		else
			elf_entry = load_elf_interp(&interp_elf_ex,
						    interpreter_dentry,
						    &interp_load_addr);

		dput(interpreter_dentry);
		kfree(elf_interpreter);

		if (elf_entry == ~0UL) {
			printk(KERN_ERR "Unable to load interpreter\n");
			kfree(elf_phdata);
			send_sig(SIGSEGV, current, 0);
			return 0;
		}
	}

	kfree(elf_phdata);

	if (interpreter_type != INTERPRETER_AOUT)
		sys_close(elf_exec_fileno);

	if (current->exec_domain && current->exec_domain->module)
		__MOD_DEC_USE_COUNT(current->exec_domain->module);
	if (current->binfmt && current->binfmt->module)
		__MOD_DEC_USE_COUNT(current->binfmt->module);
	current->exec_domain = lookup_exec_domain(current->personality);
	current->binfmt = &elf_format;
	if (current->exec_domain && current->exec_domain->module)
		__MOD_INC_USE_COUNT(current->exec_domain->module);
	if (current->binfmt && current->binfmt->module)
		__MOD_INC_USE_COUNT(current->binfmt->module);

#ifndef VM_STACK_FLAGS
	current->executable = dget(bprm->dentry);
#endif
	compute_creds(bprm);
	current->flags &= ~PF_FORKNOEXEC;
	bprm->p = (unsigned long)
	  create_elf_tables((char *)bprm->p,
			bprm->argc,
			bprm->envc,
			(interpreter_type == INTERPRETER_ELF ? &elf_ex : NULL),
			load_addr, load_bias,
			interp_load_addr,
			(interpreter_type == INTERPRETER_AOUT ? 0 : 1));
	/* N.B. passed_fileno might not be initialized? */
	if (interpreter_type == INTERPRETER_AOUT)
		current->mm->arg_start += strlen(passed_fileno) + 1;
	current->mm->start_brk = current->mm->brk = elf_brk;
	current->mm->end_code = end_code;
	current->mm->start_code = start_code;
	current->mm->end_data = end_data;
	current->mm->start_stack = bprm->p;

	/* Calling set_brk effectively mmaps the pages that we need
	 * for the bss and break sections
	 */
	set_brk(elf_bss, elf_brk);

	padzero(elf_bss);

#if 0
	printk("(start_brk) %x\n" , current->mm->start_brk);
	printk("(end_code) %x\n" , current->mm->end_code);
	printk("(start_code) %x\n" , current->mm->start_code);
	printk("(end_data) %x\n" , current->mm->end_data);
	printk("(start_stack) %x\n" , current->mm->start_stack);
	printk("(brk) %x\n" , current->mm->brk);
#endif

	if ( current->personality == PER_SVR4 )
	{
		/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
		   and some applications "depend" upon this behavior.
		   Since we do not have the power to recompile these, we
		   emulate the SVr4 behavior.  Sigh.  */
		/* N.B. Shouldn't the size here be PAGE_SIZE?? */
		error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
				MAP_FIXED | MAP_PRIVATE, 0);
	}

#ifdef ELF_PLAT_INIT
	/*
	 * The ABI may specify that certain registers be set up in special
	 * ways (on i386 %edx is the address of a DT_FINI function, for
	 * example.  This macro performs whatever initialization to
	 * the regs structure is required.
	 */
	ELF_PLAT_INIT(regs);
#endif

	start_thread(regs, elf_entry, bprm->p);
	if (current->flags & PF_PTRACED)
		send_sig(SIGTRAP, current, 0);
	retval = 0;
out:
	return retval;

	/* error cleanup */
out_free_dentry:
	dput(interpreter_dentry);
out_free_interp:
	if (elf_interpreter)
		kfree(elf_interpreter);
out_free_file:
	fput(file);
	sys_close(elf_exec_fileno);
out_free_ph:
	kfree(elf_phdata);
	goto out;
}
Ejemplo n.º 14
0
asmlinkage int exe$creprc(unsigned int *pidadr, void *image, void *input, void *output, void *error, struct _generic_64 *prvadr, unsigned int *quota, void*prcnam, unsigned int baspri, unsigned int uic, unsigned short int mbxunt, unsigned int stsflg,...) {
  unsigned long stack_here;
  struct _pcb * p, * cur;
  int retval;

  struct dsc$descriptor * imd = image, * ind = input, * oud = output, * erd = error;

  unsigned long clone_flags=CLONE_VFORK;
  //check pidadr

  ctl$gl_creprc_flags = stsflg;
  // check for PRC$M_NOUAF sometime

  if (stsflg&PRC$M_DETACH) {

  }
  if (uic) {

  }
  //setipl(IPL$_ASTDEL);//postpone this?
  cur=ctl$gl_pcb;
  vmslock(&SPIN_SCHED, IPL$_SCHED);
  vmslock(&SPIN_MMG, IPL$_MMG);
  p = alloc_task_struct();
  //bzero(p,sizeof(struct _pcb));//not wise?
  memset(p,0,sizeof(struct _pcb));

  // check more
  // compensate for no struct clone/copy
  p->sigmask_lock = SPIN_LOCK_UNLOCKED;
  p->alloc_lock = SPIN_LOCK_UNLOCKED;

  qhead_init(&p->pcb$l_astqfl);
  // and enable ast del to all modes

  p->pcb$b_type = DYN$C_PCB;

  p->pcb$b_asten=15;
  p->phd$b_astlvl=4;
  p->pr_astlvl=4;
  p->psl=0;
  p->pslindex=0;

  qhead_init(&p->pcb$l_lockqfl);
  // set capabilities
  p->pcb$l_permanent_capability = sch$gl_default_process_cap;
  p->pcb$l_capability = p->pcb$l_permanent_capability;
  // set affinity
  // set default fileprot
  // set arb
  // set mbx stuff
  // from setprn:
  if (prcnam) {
    struct dsc$descriptor *s=prcnam;
    strncpy(p->pcb$t_lname,s->dsc$a_pointer,s->dsc$w_length);
  }
  // set priv
  p->pcb$l_priv=ctl$gl_pcb->pcb$l_priv;
  // set pris
  p->pcb$b_prib=31-baspri;
  p->pcb$b_pri=31-baspri-6;
  //	if (p->pcb$b_pri<16) p->pcb$b_pri=16;
  p->pcb$w_quant=-QUANTUM;
  
  // set uic
  p->pcb$l_uic=ctl$gl_pcb->pcb$l_uic;
  // set vms pid
  // check process name
  // do something with pqb

  p->pcb$l_pqb=kmalloc(sizeof(struct _pqb),GFP_KERNEL);
  memset(p->pcb$l_pqb,0,sizeof(struct _pqb));

  struct _pqb * pqb = p->pcb$l_pqb;

  pqb->pqb$q_prvmsk = ctl$gq_procpriv;

  if (imd)
    memcpy(pqb->pqb$t_image,imd->dsc$a_pointer,imd->dsc$w_length);
  if (ind)
    memcpy(pqb->pqb$t_input,ind->dsc$a_pointer,ind->dsc$w_length);
  if (oud)
    memcpy(pqb->pqb$t_output,oud->dsc$a_pointer,oud->dsc$w_length);
  if (erd)
    memcpy(pqb->pqb$t_error,erd->dsc$a_pointer,erd->dsc$w_length);

  if (oud) // temp measure
    memcpy(p->pcb$t_terminal,oud->dsc$a_pointer,oud->dsc$w_length);

  // translate some logicals
  // copy security clearance
  // copy msg
  // copy flags
  // set jib
  // do quotas
  // process itmlst
  // set pcb$l_pqb
#if 0
  setipl(IPL$_MMG);
  vmslock(&SPIN_SCHED,-1);
  // find vacant slot in pcb vector
  // and store it
#endif  
  // make ipid and epid
  p->pcb$l_pid=alloc_ipid();
  {
    unsigned long *vec=sch$gl_pcbvec;
    vec[p->pcb$l_pid&0xffff]=p;
  }
  p->pcb$l_epid=exe$ipid_to_epid(p->pcb$l_pid);
  // should invoke sch$chse, put this at bottom?
  // setipl(0) and return

  // now lots of things from fork

	retval = -EAGAIN;
	/*
	 * Check if we are over our maximum process limit, but be sure to
	 * exclude root. This is needed to make it possible for login and
	 * friends to set the per-user process limit to something lower
	 * than the amount of processes root is running. -- Rik
	 */
#if 0
	if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur
	              && !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
		goto bad_fork_free;

	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);
#endif

	/*
	 * Counter increases are protected by
	 * the kernel lock so nr_threads can't
	 * increase under us (but it may decrease).
	 */

	get_exec_domain(p->exec_domain);

	if (p->binfmt && p->binfmt->module)
		__MOD_INC_USE_COUNT(p->binfmt->module);

	p->did_exec = 0;
	p->swappable = 0;
	p->state = TASK_UNINTERRUPTIBLE;

	//copy_flags(clone_flags, p);
	// not here?	p->pcb$l_pid = alloc_ipid();

	p->run_list.next = NULL;
	p->run_list.prev = NULL;

	p->p_cptr = NULL;
	init_waitqueue_head(&p->wait_chldexit);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	p->sigpending = 0;
	init_sigpending(&p->pending);

	p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
	p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
	init_timer(&p->real_timer);
	p->real_timer.data = (unsigned long) p;

	p->leader = 0;		/* session leadership doesn't inherit */
	p->tty_old_pgrp = 0;
	p->times.tms_utime = p->times.tms_stime = 0;
	p->times.tms_cutime = p->times.tms_cstime = 0;
	p->lock_depth = -1;		/* -1 = no lock */
	p->start_time = jiffies;

	INIT_LIST_HEAD(&p->local_pages);

	p->files = current->files;
	p->fs = current->fs;
	p->sig = current->sig;

	/* copy all the process information */
	if (copy_files(clone_flags, p))
		goto bad_fork_cleanup;
	if (copy_fs(clone_flags, p))
		goto bad_fork_cleanup_files;
	if (copy_sighand(clone_flags, p))
		goto bad_fork_cleanup_fs;

 bad_fork_cleanup:
 bad_fork_cleanup_files:
 bad_fork_cleanup_fs:

	// now a hole

	// now more from fork

	/* ok, now we should be set up.. */
	p->swappable = 1;
	p->exit_signal = 0;
	p->pdeath_signal = 0;

	/*
	 * "share" dynamic priority between parent and child, thus the
	 * total amount of dynamic priorities in the system doesnt change,
	 * more scheduling fairness. This is only important in the first
	 * timeslice, on the long run the scheduling behaviour is unchanged.
	 */

	/*
	 * Ok, add it to the run-queues and make it
	 * visible to the rest of the system.
	 *
	 * Let it rip!
	 */
	retval = p->pcb$l_epid;
	INIT_LIST_HEAD(&p->thread_group);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT and CLONE_THREAD re-use the old parent */
	p->p_opptr = current->p_opptr;
	p->p_pptr = current->p_pptr;

        p->p_opptr = current /*->p_opptr*/;
        p->p_pptr = current /*->p_pptr*/;

	SET_LINKS(p);

	nr_threads++;
	write_unlock_irq(&tasklist_lock);

	//	printk("fork befwak\n");
	//wake_up_process(p);		/* do this last */
	//	wake_up_process2(p,PRI$_TICOM);		/* do this last */
	//goto fork_out;//??


	// now something from exec

	// wait, better do execve itself

	memcpy(p->rlim, current->rlim, sizeof(p->rlim));

	qhead_init(&p->pcb$l_sqfl);

	struct mm_struct * mm = mm_alloc();
	p->mm = mm;
	p->active_mm = mm;

	p->user = INIT_USER;

	spin_lock(&mmlist_lock);
#if 0
	list_add(&mm->mmlist, &p->p_pptr->mm->mmlist);
#endif
	mmlist_nr++;
	spin_unlock(&mmlist_lock);

	// Now we are getting into the area that is really the swappers

	// To be moved to shell.c and swp$shelinit later

	p->pcb$l_phd=kmalloc(sizeof(struct _phd),GFP_KERNEL);
	init_phd(p->pcb$l_phd);

	init_fork_p1pp(p,p->pcb$l_phd,ctl$gl_pcb,ctl$gl_pcb->pcb$l_phd);
#ifdef __x86_64__
	shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x1000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x2000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x1000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x2000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ffa0000-0x1000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ffa0000-0x2000,0x7fffe000);
#else
	shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x1000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x2000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x1000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x2000,0x7fffe000);
#endif
	int exe$procstrt(struct _pcb * p);
	struct pt_regs * regs = &pidadr;
	//printk("newthread %x\n",p),
	retval = new_thread(0, clone_flags, 0, 0, p, 0);

	int eip=0,esp=0;

	//	start_thread(regs,eip,esp);

	sch$chse(p, PRI$_TICOM);

	vmsunlock(&SPIN_MMG,-1);
	vmsunlock(&SPIN_SCHED,0);

	return SS$_NORMAL;

#if 0
	return sys_execve(((struct dsc$descriptor *)image)->dsc$a_pointer,0,0);

	return SS$_NORMAL;
#endif

#if 0
{
  char * filename=((struct dsc$descriptor *)image)->dsc$a_pointer;
  char ** argv=0;
  char ** envp=0;
  struct pt_regs * regs=0;
  struct linux_binprm bprm;
  struct file *file;
  int retval;
  int i;

	file = open_exec(filename);

	retval = PTR_ERR(file);
	if (IS_ERR(file))
		return retval;

	bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
	memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0])); 

	bprm.file = file;
	bprm.filename = filename;
	bprm.sh_bang = 0;
	bprm.loader = 0;
	bprm.exec = 0;
	if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) {
		allow_write_access(file);
		fput(file);
		//printk("here 7 %x\n",bprm.argc);
		return bprm.argc;
	}

	if ((bprm.envc = count(envp, bprm.p / sizeof(void *))) < 0) {
		allow_write_access(file);
		fput(file);
		//printk("here 6\n");
		return bprm.envc;
	}

	retval = prepare_binprm(&bprm);
	//printk("here 4\n");
	if (retval < 0) 
		goto out; 

	retval = copy_strings_kernel(1, &bprm.filename, &bprm);
	//printk("here 3\n");
	if (retval < 0) 
		goto out; 

	bprm.exec = bprm.p;
	retval = copy_strings(bprm.envc, envp, &bprm);
	//printk("here 2\n");
	if (retval < 0) 
		goto out; 

	retval = copy_strings(bprm.argc, argv, &bprm);
	//printk("here 1\n");
	if (retval < 0) 
		goto out; 

	retval = search_binary_handler(&bprm,regs);
	if (retval >= 0)
		/* execve success */
		return retval;

out:
	/* Something went wrong, return the inode and free the argument pages*/
	allow_write_access(bprm.file);
	if (bprm.file)
		fput(bprm.file);

	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
		struct page * page = bprm.page[i];
		if (page)
			__free_page(page);
	}

	return retval;
}
#endif

fork_out:
	return retval;

bad_fork_free:
	free_task_struct(p);
	goto fork_out;

}
Ejemplo n.º 15
0
int __init rtcap_init(void)
{
    struct rtnet_device *rtdev;
    struct net_device   *dev;
    int                 ret;
    int                 devices = 0;
    int                 i;
    unsigned long       flags;


    printk("RTcap: real-time capturing interface\n");

#if defined(CONFIG_RTAI_24) || defined(CONFIG_RTAI_30) || defined(CONFIG_RTAI_31)
    if (start_timer) {
        rt_set_oneshot_mode();
        start_rt_timer(0);
    }
#endif

    rtskb_queue_init(&cap_queue);

    ret = rtos_nrt_signal_init(&cap_signal, rtcap_signal_handler);
    if (ret < 0)
        goto error1;

    for (i = 0; i < MAX_RT_DEVICES; i++) {
        tap_device[i].present = 0;

        rtdev = rtdev_get_by_index(i);
        if (rtdev != NULL) {
            down(&rtdev->nrt_sem);

            if (test_bit(PRIV_FLAG_UP, &rtdev->priv_flags)) {
                up(&rtdev->nrt_sem);
                printk("RTcap: %s busy, skipping device!\n", rtdev->name);
                rtdev_dereference(rtdev);
                continue;
            }

            if (rtdev->mac_priv != NULL) {
                up(&rtdev->nrt_sem);

                printk("RTcap: RTmac discipline already active on device %s. "
                       "Load RTcap before RTmac!\n", rtdev->name);

                rtdev_dereference(rtdev);
                continue;
            }

            memset(&tap_device[i].tap_dev_stats, 0,
                   sizeof(struct net_device_stats));

            dev = &tap_device[i].tap_dev;
            memset(dev, 0, sizeof(struct net_device));
            dev->init = tap_dev_init;
            dev->priv = rtdev;
            strncpy(dev->name, rtdev->name, IFNAMSIZ-1);
            dev->name[IFNAMSIZ-1] = 0;

            ret = register_netdev(dev);
            if (ret < 0) {
                up(&rtdev->nrt_sem);
                rtdev_dereference(rtdev);

                printk("RTcap: unable to register %s!\n", dev->name);
                goto error2;
            }
            tap_device[i].present = TAP_DEV;

            tap_device[i].orig_xmit = rtdev->hard_start_xmit;

            if ((rtdev->flags & IFF_LOOPBACK) == 0) {
                dev = &tap_device[i].rtmac_tap_dev;
                memset(dev, 0, sizeof(struct net_device));
                dev->init = tap_dev_init;
                dev->priv = rtdev;
                strncpy(dev->name, rtdev->name, IFNAMSIZ-1);
                dev->name[IFNAMSIZ-1] = 0;
                strncat(dev->name, "-mac", IFNAMSIZ-strlen(dev->name));

                ret = register_netdev(dev);
                if (ret < 0) {
                    up(&rtdev->nrt_sem);
                    rtdev_dereference(rtdev);

                    printk("RTcap: unable to register %s!\n", dev->name);
                    goto error2;
                }
                tap_device[i].present |= RTMAC_TAP_DEV;

                rtdev->hard_start_xmit = rtcap_xmit_hook;
            } else
                rtdev->hard_start_xmit = rtcap_loopback_xmit_hook;

            /* If the device requires no xmit_lock, start_xmit points equals
             * hard_start_xmit => we have to update this as well
             */
            if (rtdev->features & RTNETIF_F_NON_EXCLUSIVE_XMIT)
                rtdev->start_xmit = rtdev->hard_start_xmit;
                
            tap_device[i].present |= XMIT_HOOK;
            __MOD_INC_USE_COUNT(rtdev->owner);

            up(&rtdev->nrt_sem);

            devices++;
        }
    }

    if (devices == 0) {
        printk("RTcap: no real-time devices found!\n");
        ret = -ENODEV;
        goto error2;
    }

    if (rtskb_pool_init(&cap_pool, rtcap_rtskbs * devices) <
            rtcap_rtskbs * devices) {
        rtskb_pool_release(&cap_pool);
        ret = -ENOMEM;
        goto error2;
    }

    /* register capturing handlers with RTnet core */
    rtos_spin_lock_irqsave(&rtcap_lock, flags);
    rtcap_handler = rtcap_rx_hook;
    rtos_spin_unlock_irqrestore(&rtcap_lock, flags);

    return 0;

  error2:
    cleanup_tap_devices();
    rtos_nrt_signal_delete(&cap_signal);

  error1:
#if defined(CONFIG_RTAI_24) || defined(CONFIG_RTAI_30) || defined(CONFIG_RTAI_31)
    if (start_timer)
        stop_rt_timer();
#endif

    return ret;
}
Ejemplo n.º 16
0
/* 
 * 	Must be already protected by lock 
 */
static void __ipsec_alg_usage_inc(struct ipsec_alg *ixt) {
	if (ixt->ixt_module)
		__MOD_INC_USE_COUNT(ixt->ixt_module);
	atomic_inc(&ixt->ixt_refcnt);
}
Ejemplo n.º 17
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags).  The actual kick-off is left to the caller.
 */
struct task_struct *copy_process(unsigned long clone_flags,
			         unsigned long stack_start,
			         struct pt_regs *regs,
			         unsigned long stack_size,
			         int *parent_tidptr,
			         int *child_tidptr)
{
	int retval;
	struct task_struct *p = NULL;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);
	if ((clone_flags & CLONE_DETACHED) && !(clone_flags & CLONE_THREAD))
		return ERR_PTR(-EINVAL);
	if (!(clone_flags & CLONE_DETACHED) && (clone_flags & CLONE_THREAD))
		return ERR_PTR(-EINVAL);

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	p->tux_info = NULL;

	retval = -EAGAIN;

	/*
	 * Increment user->__count before the rlimit test so that it would
	 * be correct if we take the bad_fork_free failure path.
	 */
	atomic_inc(&p->user->__count);
	if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
			goto bad_fork_free;
	}

	atomic_inc(&p->user->processes);

	/*
	 * Counter increases are protected by
	 * the kernel lock so nr_threads can't
	 * increase under us (but it may decrease).
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;
	
	get_exec_domain(p->exec_domain);

	if (p->binfmt && p->binfmt->module)
		__MOD_INC_USE_COUNT(p->binfmt->module);

	p->did_exec = 0;
	p->swappable = 0;
	p->state = TASK_UNINTERRUPTIBLE;

	copy_flags(clone_flags, p);
	if (clone_flags & CLONE_IDLETASK)
		p->pid = 0;
	else {
		p->pid = alloc_pidmap();
		if (p->pid == -1)
			goto bad_fork_cleanup;
	}
	
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
			goto bad_fork_cleanup;

	INIT_LIST_HEAD(&p->run_list);

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	init_waitqueue_head(&p->wait_chldexit);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);
	spin_lock_init(&p->switch_lock);

	p->sigpending = 0;
	init_sigpending(&p->pending);

	p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
	p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
	init_timer(&p->real_timer);
	p->real_timer.data = (unsigned long) p;

	p->leader = 0;		/* session leadership doesn't inherit */
	p->tty_old_pgrp = 0;
	memset(&p->utime, 0, sizeof(p->utime));
	memset(&p->stime, 0, sizeof(p->stime));
	memset(&p->cutime, 0, sizeof(p->cutime));
	memset(&p->cstime, 0, sizeof(p->cstime));
	memset(&p->group_utime, 0, sizeof(p->group_utime));
	memset(&p->group_stime, 0, sizeof(p->group_stime));
	memset(&p->group_cutime, 0, sizeof(p->group_cutime));
	memset(&p->group_cstime, 0, sizeof(p->group_cstime));

#ifdef CONFIG_SMP
	memset(&p->per_cpu_utime, 0, sizeof(p->per_cpu_utime));
	memset(&p->per_cpu_stime, 0, sizeof(p->per_cpu_stime));
#endif

	memset(&p->timing_state, 0, sizeof(p->timing_state));
	p->timing_state.type = PROCESS_TIMING_USER;
	p->last_sigxcpu = 0;
	p->array = NULL;
	p->lock_depth = -1;		/* -1 = no lock */
	p->start_time = jiffies;

	retval = -ENOMEM;
	/* copy all the process information */
	if (copy_files(clone_flags, p))
		goto bad_fork_cleanup;
	if (copy_fs(clone_flags, p))
		goto bad_fork_cleanup_files;
	if (copy_sighand(clone_flags, p))
		goto bad_fork_cleanup_fs;
	if (copy_signal(clone_flags, p))
		goto bad_fork_cleanup_sighand;
	if (copy_mm(clone_flags, p))
		goto bad_fork_cleanup_signal;
	if (copy_namespace(clone_flags, p))
		goto bad_fork_cleanup_mm;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_namespace;
	p->semundo = NULL;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID)
		? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID)
		? child_tidptr : NULL;

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	   
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->swappable = 1;
	if (clone_flags & CLONE_DETACHED)
		p->exit_signal = -1;
	else
		p->exit_signal = clone_flags & CSIGNAL;
	p->pdeath_signal = 0;

	/*
	 * Share the timeslice between parent and child, thus the
	 * total amount of pending timeslices in the system doesnt change,
	 * resulting in more scheduling fairness.
	 */
	local_irq_disable();
	p->time_slice = (current->time_slice + 1) >> 1;
	p->first_time_slice = 1;
	/*
	 * The remainder of the first timeslice might be recovered by
	 * the parent if the child exits early enough.
	 */
	current->time_slice >>= 1;
	p->last_run = jiffies;
	if (!current->time_slice) {
		/*
		 * This case is rare, it happens when the parent has only
		 * a single jiffy left from its timeslice. Taking the
		 * runqueue lock is not a problem.
		 */
		current->time_slice = 1;
		scheduler_tick(0 /* don't update the time stats */);
	}
	local_irq_enable();

	if ((int)current->time_slice <= 0)
		BUG();
	if ((int)p->time_slice <= 0)
		BUG();

	/*
	 * Ok, add it to the run-queues and make it
	 * visible to the rest of the system.
	 *
	 * Let it rip!
	 */
	p->tgid = p->pid;
	p->group_leader = p;
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);
	/*
	 * Check for pending SIGKILL! The new thread should not be allowed
	 * to slip out of an OOM kill. (or normal SIGKILL.)
	 */
	if (sigismember(&current->pending.signal, SIGKILL)) {
		write_unlock_irq(&tasklist_lock);
		retval = -EINTR;
		goto bad_fork_cleanup_namespace;
	}

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	if (clone_flags & CLONE_THREAD) {
		spin_lock(&current->sighand->siglock);
		/*
		 * Important: if an exit-all has been started then
		 * do not create this new thread - the whole thread
		 * group is supposed to exit anyway.
		 */
		if (current->signal->group_exit) {
			spin_unlock(&current->sighand->siglock);
			write_unlock_irq(&tasklist_lock);
			retval = -EINTR;
			goto bad_fork_cleanup_namespace;
		}
		p->tgid = current->tgid;
		p->group_leader = current->group_leader;

		if (current->signal->group_stop_count > 0) {
			/*
			 * There is an all-stop in progress for the group.
			 * We ourselves will stop as soon as we check signals.
			 * Make the new thread part of that group stop too.
			 */
			current->signal->group_stop_count++;
			p->sigpending = 1;
		}

		spin_unlock(&current->sighand->siglock);
	}

	SET_LINKS(p);
	if (p->ptrace & PT_PTRACED)
		__ptrace_link(p, current->parent);

	attach_pid(p, PIDTYPE_PID, p->pid);
	if (thread_group_leader(p)) {
		attach_pid(p, PIDTYPE_TGID, p->tgid);
		attach_pid(p, PIDTYPE_PGID, p->pgrp);
		attach_pid(p, PIDTYPE_SID, p->session);
	} else {
		link_pid(p, p->pids + PIDTYPE_TGID,
			&p->group_leader->pids[PIDTYPE_TGID].pid);
	}

	/* clear controlling tty of new task if parent's was just cleared */
	if (!current->tty && p->tty)
		p->tty = NULL;

	nr_threads++;
	write_unlock_irq(&tasklist_lock);
	retval = 0;

fork_out:
	if (retval)
		return ERR_PTR(retval);
	return p;

bad_fork_cleanup_namespace:
	exit_namespace(p);
bad_fork_cleanup_mm:
	exit_mm(p);
	if (p->active_mm)
		mmdrop(p->active_mm);
bad_fork_cleanup_signal:
	exit_signal(p);
bad_fork_cleanup_sighand:
	exit_sighand(p);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup:
	if (p->pid > 0)
		free_pidmap(p->pid);
	put_exec_domain(p->exec_domain);
	if (p->binfmt && p->binfmt->module)
		__MOD_DEC_USE_COUNT(p->binfmt->module);
bad_fork_cleanup_count:
	atomic_dec(&p->user->processes);
bad_fork_free:
	p->state = TASK_ZOMBIE; /* debug */
	atomic_dec(&p->usage);
	put_task_struct(p);
	goto fork_out;
}