Пример #1
0
/*
 * the scsi_ioctl() function differs from most ioctls in that it does
 * not take a major/minor number as the dev field.  Rather, it takes
 * a pointer to a scsi_devices[] element, a structure. 
 */
int scsi_ioctl(Scsi_Device * dev, int cmd, void *arg)
{
	int result;
	char scsi_cmd[MAX_COMMAND_SIZE];
	char cmd_byte1;
	Scsi_Device_Internal_State devState;

	/* No idea how this happens.... */
	if (!dev)
		return -ENXIO;

	/*
	 * If we are in the middle of error recovery, don't let anyone
	 * else try and use this device.  Also, if error recovery fails, it
	 * may try and take the device offline, in which case all further
	 * access to the device is prohibited.
	 */
	if (!scsi_block_when_processing_errors(dev)) {
		return -ENODEV;
	}
	cmd_byte1 = (dev->scsi_level <= SCSI_2) ? (dev->lun << 5) : 0;

	switch (cmd) {
	case SCSI_IOCTL_GET_IDLUN:
		if (verify_area(VERIFY_WRITE, arg, sizeof(Scsi_Idlun)))
			return -EFAULT;

		__put_user((dev->id & 0xff)
			 + ((dev->lun & 0xff) << 8)
			 + ((dev->channel & 0xff) << 16)
			 + ((dev->host->host_no & 0xff) << 24),
			 &((Scsi_Idlun *) arg)->dev_id);
		__put_user(dev->host->unique_id, &((Scsi_Idlun *) arg)->host_unique_id);
		return 0;
	case SCSI_IOCTL_GET_BUS_NUMBER:
		return put_user(dev->host->host_no, (int *) arg);
	case SCSI_IOCTL_TAGGED_ENABLE:
		if (!capable(CAP_SYS_ADMIN))
			return -EACCES;
		if (!dev->tagged_supported)
			return -EINVAL;
		dev->tagged_queue = 1;
		dev->current_tag = 1;
		return 0;
	case SCSI_IOCTL_TAGGED_DISABLE:
		if (!capable(CAP_SYS_ADMIN))
			return -EACCES;
		if (!dev->tagged_supported)
			return -EINVAL;
		dev->tagged_queue = 0;
		dev->current_tag = 0;
		return 0;
	case SCSI_IOCTL_PROBE_HOST:
		return ioctl_probe(dev->host, arg);
	case SCSI_IOCTL_SEND_COMMAND:
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
			return -EACCES;
		return scsi_ioctl_send_command((Scsi_Device *) dev,
					     (Scsi_Ioctl_Command *) arg);
	case SCSI_IOCTL_DOORLOCK:
		if (!dev->removable || !dev->lockable)
			return 0;
		scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL;
		scsi_cmd[1] = cmd_byte1;
		scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
		scsi_cmd[4] = SCSI_REMOVAL_PREVENT;
		return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
				   IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES);
		break;
	case SCSI_IOCTL_DOORUNLOCK:
		if (!dev->removable || !dev->lockable)
			return 0;
		scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL;
		scsi_cmd[1] = cmd_byte1;
		scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
		scsi_cmd[4] = SCSI_REMOVAL_ALLOW;
		return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
				   IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES);
	case SCSI_IOCTL_TEST_UNIT_READY:
		scsi_cmd[0] = TEST_UNIT_READY;
		scsi_cmd[1] = cmd_byte1;
		scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
		scsi_cmd[4] = 0;
		return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
				   IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES);
		break;
	case SCSI_IOCTL_START_UNIT:
		scsi_cmd[0] = START_STOP;
		scsi_cmd[1] = cmd_byte1;
		scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
		scsi_cmd[4] = 1;
		return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
				     START_STOP_TIMEOUT, NORMAL_RETRIES);
		break;
	case SCSI_IOCTL_STOP_UNIT:
		scsi_cmd[0] = START_STOP;
		scsi_cmd[1] = cmd_byte1;
		scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
		scsi_cmd[4] = 0;
		return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
				     START_STOP_TIMEOUT, NORMAL_RETRIES);
		break;
        case SCSI_IOCTL_GET_PCI:
                return scsi_ioctl_get_pci(dev, arg);
                break;
	case SCSI_IOCTL_GET_DEVICE_INTERNAL_STATE:

		result = verify_area(VERIFY_WRITE, arg, sizeof (Scsi_Device_Internal_State));
		if (result) return result;
		memset(&devState, 0, sizeof(Scsi_Device_Internal_State));
		copy_internal_device_state( &devState, dev, 0);
		if (copy_to_user(arg, &devState,sizeof(Scsi_Device_Internal_State)))
			return -EFAULT;
		return 0;
		break;

	case SCSI_IOCTL_SET_DEVICE_INTERNAL_STATE:
#if defined(CONFIG_SCSI_DEBUG)
		if(!capable(CAP_SYS_ADMIN))  return -EACCES;
		result = verify_area(VERIFY_READ, arg, sizeof (Scsi_Device_Internal_State));
		if (result) return result;
		memset(&devState, 0, sizeof(Scsi_Device_Internal_State));
		if (copy_from_user(&devState, arg, sizeof(Scsi_Device_Internal_State)))
			return -EFAULT;
		copy_internal_device_state( &devState, dev, 1);
		return 0;
#else
		return -EFAULT;
#endif
		break;

	default:
		if (dev->host->hostt->ioctl)
			return dev->host->hostt->ioctl(dev, cmd, arg);
		return -EINVAL;
	}
	return -EINVAL;
}
Пример #2
0
static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
{
	struct jffs2_full_dnode *old_metadata, *new_metadata;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_raw_inode *ri;
	unsigned short dev;
	unsigned char *mdata = NULL;
	int mdatalen = 0;
	unsigned int ivalid;
	uint32_t phys_ofs, alloclen;
	int ret;
	D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
	ret = inode_change_ok(inode, iattr);
	if (ret) 
		return ret;

	/* Special cases - we don't want more than one data node
	   for these types on the medium at any time. So setattr
	   must read the original data associated with the node
	   (i.e. the device numbers or the target name) and write
	   it out again with the appropriate data attached */
	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
		/* For these, we don't actually need to read the old node */
		dev = old_encode_dev(inode->i_rdev);
		mdata = (char *)&dev;
		mdatalen = sizeof(dev);
		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
	} else if (S_ISLNK(inode->i_mode)) {
		mdatalen = f->metadata->size;
		mdata = kmalloc(f->metadata->size, GFP_USER);
		if (!mdata)
			return -ENOMEM;
		ret = jffs2_read_dnode(c, f->metadata, mdata, 0, mdatalen);
		if (ret) {
			kfree(mdata);
			return ret;
		}
		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
	}

	ri = jffs2_alloc_raw_inode();
	if (!ri) {
		if (S_ISLNK(inode->i_mode))
			kfree(mdata);
		return -ENOMEM;
	}
		
	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL);
	if (ret) {
		jffs2_free_raw_inode(ri);
		if (S_ISLNK(inode->i_mode & S_IFMT))
			 kfree(mdata);
		return ret;
	}
	down(&f->sem);
	ivalid = iattr->ia_valid;
	
	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));

	ri->ino = cpu_to_je32(inode->i_ino);
	ri->version = cpu_to_je32(++f->highest_version);

	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);

	if (ivalid & ATTR_MODE)
		if (iattr->ia_mode & S_ISGID &&
		    !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
			ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
		else 
			ri->mode = cpu_to_jemode(iattr->ia_mode);
	else
		ri->mode = cpu_to_jemode(inode->i_mode);


	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));

	ri->offset = cpu_to_je32(0);
	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
	ri->compr = JFFS2_COMPR_NONE;
	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
		/* It's an extension. Make it a hole node */
		ri->compr = JFFS2_COMPR_ZERO;
		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
		ri->offset = cpu_to_je32(inode->i_size);
	}
	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
	if (mdatalen)
		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
	else
		ri->data_crc = cpu_to_je32(0);

	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL);
	if (S_ISLNK(inode->i_mode))
		kfree(mdata);
	
	if (IS_ERR(new_metadata)) {
		jffs2_complete_reservation(c);
		jffs2_free_raw_inode(ri);
		up(&f->sem);
		return PTR_ERR(new_metadata);
	}
	/* It worked. Update the inode */
	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
	inode->i_mode = jemode_to_cpu(ri->mode);
	inode->i_uid = je16_to_cpu(ri->uid);
	inode->i_gid = je16_to_cpu(ri->gid);


	old_metadata = f->metadata;

	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
		vmtruncate(inode, iattr->ia_size);
		jffs2_truncate_fraglist (c, &f->fragtree, iattr->ia_size);
	}

	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
		inode->i_size = iattr->ia_size;
		f->metadata = NULL;
	} else {
		f->metadata = new_metadata;
	}
	if (old_metadata) {
		jffs2_mark_node_obsolete(c, old_metadata->raw);
		jffs2_free_full_dnode(old_metadata);
	}
	jffs2_free_raw_inode(ri);

	up(&f->sem);
	jffs2_complete_reservation(c);

	return 0;
}
Пример #3
0
static int open_mem(struct inode * inode, struct file * filp)
{
	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
Пример #4
0
Файл: rtc.c Проект: 7LK/McWRT
static ssize_t rtc_write(UNUSED struct file *filp, const char *buf,
                         size_t count, loff_t *ppos)
{
	struct rtc_time rtc_tm;
	char buffer[23];
	char *p;

	if (!capable(CAP_SYS_TIME))
		return -EACCES;

	if (ppos != &filp->f_pos)
		return -ESPIPE;

	/*
	 * For simplicity, the only acceptable format is:
	 * YYYY:MM:DD:W:HH:MM:SS\n
	 */

	if (count != 22)
		goto err_out;

	if (copy_from_user(buffer, buf, count))
		return -EFAULT;

	buffer[sizeof(buffer)-1] = '\0';

	p = &buffer[0];

	rtc_tm.tm_year  = simple_strtoul(p, &p, 10);
	if (*p++ != ':') goto err_out;

	rtc_tm.tm_mon = simple_strtoul(p, &p, 10) - 1;
	if (*p++ != ':') goto err_out;

	rtc_tm.tm_mday = simple_strtoul(p, &p, 10);
	if (*p++ != ':') goto err_out;

	rtc_tm.tm_wday = simple_strtoul(p, &p, 10);
	if (*p++ != ':') goto err_out;

	rtc_tm.tm_hour = simple_strtoul(p, &p, 10);
	if (*p++ != ':') goto err_out;

	rtc_tm.tm_min = simple_strtoul(p, &p, 10);
	if (*p++ != ':') goto err_out;

	rtc_tm.tm_sec = simple_strtoul(p, &p, 10);
	if (*p != '\n') goto err_out;

	rtc_tm.tm_year -= RTC_EPOCH;

	set_rtc_time(&rtc_tm);

	*ppos += count;

	return count;

 err_out:
	printk(KERN_ERR "invalid format: use YYYY:MM:DD:W:HH:MM:SS\\n\n");
	return -EINVAL;
}
Пример #5
0
static int
ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
{
	int err = 0;
	struct ip_tunnel_parm p;
	struct ip_tunnel *t;

	switch (cmd) {
	case SIOCGETTUNNEL:
		t = NULL;
		if (dev == ipip6_fb_tunnel_dev) {
			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
				err = -EFAULT;
				break;
			}
			t = ipip6_tunnel_locate(&p, 0);
		}
		if (t == NULL)
			t = netdev_priv(dev);
		memcpy(&p, &t->parms, sizeof(p));
		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
			err = -EFAULT;
		break;

	case SIOCADDTUNNEL:
	case SIOCCHGTUNNEL:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			goto done;

		err = -EFAULT;
		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
			goto done;

		err = -EINVAL;
		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 ||
		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
			goto done;
		if (p.iph.ttl)
			p.iph.frag_off |= htons(IP_DF);

		t = ipip6_tunnel_locate(&p, cmd == SIOCADDTUNNEL);

		if (dev != ipip6_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
			if (t != NULL) {
				if (t->dev != dev) {
					err = -EEXIST;
					break;
				}
			} else {
				if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
				    (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
					err = -EINVAL;
					break;
				}
				t = netdev_priv(dev);
				ipip6_tunnel_unlink(t);
				t->parms.iph.saddr = p.iph.saddr;
				t->parms.iph.daddr = p.iph.daddr;
				memcpy(dev->dev_addr, &p.iph.saddr, 4);
				memcpy(dev->broadcast, &p.iph.daddr, 4);
				ipip6_tunnel_link(t);
				netdev_state_change(dev);
			}
		}

		if (t) {
			err = 0;
			if (cmd == SIOCCHGTUNNEL) {
				t->parms.iph.ttl = p.iph.ttl;
				t->parms.iph.tos = p.iph.tos;
			}
			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
				err = -EFAULT;
		} else
			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
		break;

	case SIOCDELTUNNEL:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			goto done;

		if (dev == ipip6_fb_tunnel_dev) {
			err = -EFAULT;
			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
				goto done;
			err = -ENOENT;
			if ((t = ipip6_tunnel_locate(&p, 0)) == NULL)
				goto done;
			err = -EPERM;
			if (t == netdev_priv(ipip6_fb_tunnel_dev))
				goto done;
			dev = t->dev;
		}
		err = unregister_netdevice(dev);
		break;

	default:
		err = -EINVAL;
	}

done:
	return err;
}
Пример #6
0
static int pep_setsockopt(struct sock *sk, int level, int optname,
				char __user *optval, unsigned int optlen)
{
	struct pep_sock *pn = pep_sk(sk);
	int val = 0, err = 0;

	if (level != SOL_PNPIPE)
		return -ENOPROTOOPT;
	if (optlen >= sizeof(int)) {
		if (get_user(val, (int __user *) optval))
			return -EFAULT;
	}

	lock_sock(sk);
	switch (optname) {
	case PNPIPE_ENCAP:
		if (val && val != PNPIPE_ENCAP_IP) {
			err = -EINVAL;
			break;
		}
		if (!pn->ifindex == !val)
			break; /* Nothing to do! */
		if (!capable(CAP_NET_ADMIN)) {
			err = -EPERM;
			break;
		}
		if (val) {
			release_sock(sk);
			err = gprs_attach(sk);
			if (err > 0) {
				pn->ifindex = err;
				err = 0;
			}
		} else {
			pn->ifindex = 0;
			release_sock(sk);
			gprs_detach(sk);
			err = 0;
		}
		goto out_norel;

	case PNPIPE_HANDLE:
		if ((sk->sk_state == TCP_CLOSE) &&
			(val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
			pn->pipe_handle = val;
		else
			err = -EINVAL;
		break;

	case PNPIPE_INITSTATE:
		pn->init_enable = !!val;
		break;

	default:
		err = -ENOPROTOOPT;
	}
	release_sock(sk);

out_norel:
	return err;
}
Пример #7
0
static int ax8netfilter_do_ip_setsockopt(struct sock *sk, int level,
			    int optname, char __user *optval, int optlen)
{
	struct inet_sock *inet = inet_sk(sk);
	int val = 0, err;

	if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
			     (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
			     (1<<IP_RETOPTS) | (1<<IP_TOS) |
			     (1<<IP_TTL) | (1<<IP_HDRINCL) |
			     (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
			     (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
			     (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) ||
	    optname == IP_MULTICAST_TTL ||
	    optname == IP_MULTICAST_LOOP ||
	    optname == IP_RECVORIGDSTADDR) {
		if (optlen >= sizeof(int)) {
			if (get_user(val, (int __user *) optval))
				return -EFAULT;
		} else if (optlen >= sizeof(char)) {
			unsigned char ucval;

			if (get_user(ucval, (unsigned char __user *) optval))
				return -EFAULT;
			val = (int) ucval;
		}
	}

	/* If optlen==0, it is equivalent to val == 0 */

	if (ip_mroute_opt(optname))
		return ip_mroute_setsockopt(sk, optname, optval, optlen);

	err = 0;
	lock_sock(sk);

	switch (optname) {
	case IP_OPTIONS:
	{
		struct ip_options * opt = NULL;
		if (optlen > 40 || optlen < 0)
			goto e_inval;
		err = ax8netfilter_ip_options_get_from_user(sock_net(sk), &opt,
					       optval, optlen);
		if (err)
			break;
		if (inet->is_icsk) {
			struct inet_connection_sock *icsk = inet_csk(sk);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
			if (sk->sk_family == PF_INET ||
			    (!((1 << sk->sk_state) &
			       (TCPF_LISTEN | TCPF_CLOSE)) &&
			     inet->daddr != LOOPBACK4_IPV6)) {
#endif
				if (inet->opt)
					icsk->icsk_ext_hdr_len -= inet->opt->optlen;
				if (opt)
					icsk->icsk_ext_hdr_len += opt->optlen;
				icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
			}
#endif
		}
		opt = xchg(&inet->opt, opt);
		kfree(opt);
		break;
	}
	case IP_PKTINFO:
		if (val)
			inet->cmsg_flags |= IP_CMSG_PKTINFO;
		else
			inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
		break;
	case IP_RECVTTL:
		if (val)
			inet->cmsg_flags |=  IP_CMSG_TTL;
		else
			inet->cmsg_flags &= ~IP_CMSG_TTL;
		break;
	case IP_RECVTOS:
		if (val)
			inet->cmsg_flags |=  IP_CMSG_TOS;
		else
			inet->cmsg_flags &= ~IP_CMSG_TOS;
		break;
	case IP_RECVOPTS:
		if (val)
			inet->cmsg_flags |=  IP_CMSG_RECVOPTS;
		else
			inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
		break;
	case IP_RETOPTS:
		if (val)
			inet->cmsg_flags |= IP_CMSG_RETOPTS;
		else
			inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
		break;
	case IP_PASSSEC:
		if (val)
			inet->cmsg_flags |= IP_CMSG_PASSSEC;
		else
			inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
		break;
	case IP_RECVORIGDSTADDR:
		if (val)
			inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
		else
			inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
		break;
	case IP_TOS:	/* This sets both TOS and Precedence */
		if (sk->sk_type == SOCK_STREAM) {
			val &= ~3;
			val |= inet->tos & 3;
		}
		if (inet->tos != val) {
			inet->tos = val;
			sk->sk_priority = ax8netfilter_rt_tos2priority(val);
			sk_dst_reset(sk);
		}
		break;
	case IP_TTL:
		if (optlen<1)
			goto e_inval;
		if (val != -1 && (val < 1 || val>255))
			goto e_inval;
		inet->uc_ttl = val;
		break;
	case IP_HDRINCL:
		if (sk->sk_type != SOCK_RAW) {
			err = -ENOPROTOOPT;
			break;
		}
		inet->hdrincl = val ? 1 : 0;
		break;
	case IP_MTU_DISCOVER:
		if (val<0 || val>3)
			goto e_inval;
		inet->pmtudisc = val;
		break;
	case IP_RECVERR:
		inet->recverr = !!val;
		if (!val)
			skb_queue_purge(&sk->sk_error_queue);
		break;
	case IP_MULTICAST_TTL:
		if (sk->sk_type == SOCK_STREAM)
			goto e_inval;
		if (optlen<1)
			goto e_inval;
		if (val == -1)
			val = 1;
		if (val < 0 || val > 255)
			goto e_inval;
		inet->mc_ttl = val;
		break;
	case IP_MULTICAST_LOOP:
		if (optlen<1)
			goto e_inval;
		inet->mc_loop = !!val;
		break;
	case IP_MULTICAST_IF:
	{
		struct ip_mreqn mreq;
		struct net_device *dev = NULL;

		if (sk->sk_type == SOCK_STREAM)
			goto e_inval;
		/*
		 *	Check the arguments are allowable
		 */

		err = -EFAULT;
		if (optlen >= sizeof(struct ip_mreqn)) {
			if (copy_from_user(&mreq, optval, sizeof(mreq)))
				break;
		} else {
			memset(&mreq, 0, sizeof(mreq));
			if (optlen >= sizeof(struct in_addr) &&
			    copy_from_user(&mreq.imr_address, optval, sizeof(struct in_addr)))
				break;
		}

		if (!mreq.imr_ifindex) {
			if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
				inet->mc_index = 0;
				inet->mc_addr  = 0;
				err = 0;
				break;
			}
			dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
			if (dev) {
				mreq.imr_ifindex = dev->ifindex;
				dev_put(dev);
			}
		} else
			dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex);


		err = -EADDRNOTAVAIL;
		if (!dev)
			break;

		err = -EINVAL;
		if (sk->sk_bound_dev_if &&
		    mreq.imr_ifindex != sk->sk_bound_dev_if)
			break;

		inet->mc_index = mreq.imr_ifindex;
		inet->mc_addr  = mreq.imr_address.s_addr;
		err = 0;
		break;
	}

	case IP_ADD_MEMBERSHIP:
	case IP_DROP_MEMBERSHIP:
	{
		struct ip_mreqn mreq;

		err = -EPROTO;
		if (inet_sk(sk)->is_icsk)
			break;

		if (optlen < sizeof(struct ip_mreq))
			goto e_inval;
		err = -EFAULT;
		if (optlen >= sizeof(struct ip_mreqn)) {
			if (copy_from_user(&mreq, optval, sizeof(mreq)))
				break;
		} else {
			memset(&mreq, 0, sizeof(mreq));
			if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
				break;
		}

		if (optname == IP_ADD_MEMBERSHIP)
			err = ip_mc_join_group(sk, &mreq);
		else
			err = ax8netfilter_ip_mc_leave_group(sk, &mreq);
		break;
	}
	case IP_MSFILTER:
	{
		extern int *ax8netfilter_sysctl_igmp_max_msf;
		struct ip_msfilter *msf;

		if (optlen < IP_MSFILTER_SIZE(0))
			goto e_inval;
		if (optlen > sysctl_optmem_max) {
			err = -ENOBUFS;
			break;
		}
		msf = kmalloc(optlen, GFP_KERNEL);
		if (!msf) {
			err = -ENOBUFS;
			break;
		}
		err = -EFAULT;
		if (copy_from_user(msf, optval, optlen)) {
			kfree(msf);
			break;
		}
		/* numsrc >= (1G-4) overflow in 32 bits */
		if (msf->imsf_numsrc >= 0x3ffffffcU ||
		    msf->imsf_numsrc > (*ax8netfilter_sysctl_igmp_max_msf)) {
			kfree(msf);
			err = -ENOBUFS;
			break;
		}
		if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
			kfree(msf);
			err = -EINVAL;
			break;
		}
		err = ax8netfilter_ip_mc_msfilter(sk, msf, 0);
		kfree(msf);
		break;
	}
	case IP_BLOCK_SOURCE:
	case IP_UNBLOCK_SOURCE:
	case IP_ADD_SOURCE_MEMBERSHIP:
	case IP_DROP_SOURCE_MEMBERSHIP:
	{
		struct ip_mreq_source mreqs;
		int omode, add;

		if (optlen != sizeof(struct ip_mreq_source))
			goto e_inval;
		if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
			err = -EFAULT;
			break;
		}
		if (optname == IP_BLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 1;
		} else if (optname == IP_UNBLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 0;
		} else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
			struct ip_mreqn mreq;

			mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
			mreq.imr_address.s_addr = mreqs.imr_interface;
			mreq.imr_ifindex = 0;
			err = ip_mc_join_group(sk, &mreq);
			if (err && err != -EADDRINUSE)
				break;
			omode = MCAST_INCLUDE;
			add = 1;
		} else /* IP_DROP_SOURCE_MEMBERSHIP */ {
			omode = MCAST_INCLUDE;
			add = 0;
		}
		err = ax8netfilter_ip_mc_source(add, omode, sk, &mreqs, 0);
		break;
	}
	case MCAST_JOIN_GROUP:
	case MCAST_LEAVE_GROUP:
	{
		struct group_req greq;
		struct sockaddr_in *psin;
		struct ip_mreqn mreq;

		if (optlen < sizeof(struct group_req))
			goto e_inval;
		err = -EFAULT;
		if (copy_from_user(&greq, optval, sizeof(greq)))
			break;
		psin = (struct sockaddr_in *)&greq.gr_group;
		if (psin->sin_family != AF_INET)
			goto e_inval;
		memset(&mreq, 0, sizeof(mreq));
		mreq.imr_multiaddr = psin->sin_addr;
		mreq.imr_ifindex = greq.gr_interface;

		if (optname == MCAST_JOIN_GROUP)
			err = ip_mc_join_group(sk, &mreq);
		else
			err = ax8netfilter_ip_mc_leave_group(sk, &mreq);
		break;
	}
	case MCAST_JOIN_SOURCE_GROUP:
	case MCAST_LEAVE_SOURCE_GROUP:
	case MCAST_BLOCK_SOURCE:
	case MCAST_UNBLOCK_SOURCE:
	{
		struct group_source_req greqs;
		struct ip_mreq_source mreqs;
		struct sockaddr_in *psin;
		int omode, add;

		if (optlen != sizeof(struct group_source_req))
			goto e_inval;
		if (copy_from_user(&greqs, optval, sizeof(greqs))) {
			err = -EFAULT;
			break;
		}
		if (greqs.gsr_group.ss_family != AF_INET ||
		    greqs.gsr_source.ss_family != AF_INET) {
			err = -EADDRNOTAVAIL;
			break;
		}
		psin = (struct sockaddr_in *)&greqs.gsr_group;
		mreqs.imr_multiaddr = psin->sin_addr.s_addr;
		psin = (struct sockaddr_in *)&greqs.gsr_source;
		mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
		mreqs.imr_interface = 0; /* use index for mc_source */

		if (optname == MCAST_BLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 1;
		} else if (optname == MCAST_UNBLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 0;
		} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
			struct ip_mreqn mreq;

			psin = (struct sockaddr_in *)&greqs.gsr_group;
			mreq.imr_multiaddr = psin->sin_addr;
			mreq.imr_address.s_addr = 0;
			mreq.imr_ifindex = greqs.gsr_interface;
			err = ip_mc_join_group(sk, &mreq);
			if (err && err != -EADDRINUSE)
				break;
			greqs.gsr_interface = mreq.imr_ifindex;
			omode = MCAST_INCLUDE;
			add = 1;
		} else /* MCAST_LEAVE_SOURCE_GROUP */ {
			omode = MCAST_INCLUDE;
			add = 0;
		}
		err = ax8netfilter_ip_mc_source(add, omode, sk, &mreqs,
				   greqs.gsr_interface);
		break;
	}
	case MCAST_MSFILTER:
	{
		extern int * ax8netfilter_sysctl_igmp_max_msf;
		struct sockaddr_in *psin;
		struct ip_msfilter *msf = NULL;
		struct group_filter *gsf = NULL;
		int msize, i, ifindex;

		if (optlen < GROUP_FILTER_SIZE(0))
			goto e_inval;
		if (optlen > sysctl_optmem_max) {
			err = -ENOBUFS;
			break;
		}
		gsf = kmalloc(optlen, GFP_KERNEL);
		if (!gsf) {
			err = -ENOBUFS;
			break;
		}
		err = -EFAULT;
		if (copy_from_user(gsf, optval, optlen)) {
			goto mc_msf_out;
		}
		/* numsrc >= (4G-140)/128 overflow in 32 bits */
		if (gsf->gf_numsrc >= 0x1ffffff ||
		    gsf->gf_numsrc > (*ax8netfilter_sysctl_igmp_max_msf)) {
			err = -ENOBUFS;
			goto mc_msf_out;
		}
		if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
			err = -EINVAL;
			goto mc_msf_out;
		}
		msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
		msf = kmalloc(msize, GFP_KERNEL);
		if (!msf) {
			err = -ENOBUFS;
			goto mc_msf_out;
		}
		ifindex = gsf->gf_interface;
		psin = (struct sockaddr_in *)&gsf->gf_group;
		if (psin->sin_family != AF_INET) {
			err = -EADDRNOTAVAIL;
			goto mc_msf_out;
		}
		msf->imsf_multiaddr = psin->sin_addr.s_addr;
		msf->imsf_interface = 0;
		msf->imsf_fmode = gsf->gf_fmode;
		msf->imsf_numsrc = gsf->gf_numsrc;
		err = -EADDRNOTAVAIL;
		for (i=0; i<gsf->gf_numsrc; ++i) {
			psin = (struct sockaddr_in *)&gsf->gf_slist[i];

			if (psin->sin_family != AF_INET)
				goto mc_msf_out;
			msf->imsf_slist[i] = psin->sin_addr.s_addr;
		}
		kfree(gsf);
		gsf = NULL;

		err = ax8netfilter_ip_mc_msfilter(sk, msf, ifindex);
	mc_msf_out:
		kfree(msf);
		kfree(gsf);
		break;
	}
	case IP_ROUTER_ALERT:
		err = ax8netfilter_ip_ra_control(sk, val ? 1 : 0, NULL);
		break;

	case IP_FREEBIND:
		if (optlen<1)
			goto e_inval;
		inet->freebind = !!val;
		break;

	case IP_IPSEC_POLICY:
	case IP_XFRM_POLICY:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		err = xfrm_user_policy(sk, optname, optval, optlen);
		break;

	case IP_TRANSPARENT:
		if (!capable(CAP_NET_ADMIN)) {
			err = -EPERM;
			break;
		}
		if (optlen < 1)
			goto e_inval;
		inet->transparent = !!val;
		break;

	default:
		err = -ENOPROTOOPT;
		break;
	}
	release_sock(sk);
	return err;

e_inval:
	release_sock(sk);
	return -EINVAL;
}
Пример #8
0
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
{
	struct inode *inode = file->f_path.dentry->d_inode;
	struct btrfs_inode *ip = BTRFS_I(inode);
	struct btrfs_root *root = ip->root;
	struct btrfs_trans_handle *trans;
	unsigned int flags, oldflags;
	int ret;

	if (copy_from_user(&flags, arg, sizeof(flags)))
		return -EFAULT;

	if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
		      FS_NOATIME_FL | FS_NODUMP_FL | \
		      FS_SYNC_FL | FS_DIRSYNC_FL))
		return -EOPNOTSUPP;

	if (!is_owner_or_cap(inode))
		return -EACCES;

	mutex_lock(&inode->i_mutex);

	flags = btrfs_mask_flags(inode->i_mode, flags);
	oldflags = btrfs_flags_to_ioctl(ip->flags);
	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
		if (!capable(CAP_LINUX_IMMUTABLE)) {
			ret = -EPERM;
			goto out_unlock;
		}
	}

	ret = mnt_want_write(file->f_path.mnt);
	if (ret)
		goto out_unlock;

	if (flags & FS_SYNC_FL)
		ip->flags |= BTRFS_INODE_SYNC;
	else
		ip->flags &= ~BTRFS_INODE_SYNC;
	if (flags & FS_IMMUTABLE_FL)
		ip->flags |= BTRFS_INODE_IMMUTABLE;
	else
		ip->flags &= ~BTRFS_INODE_IMMUTABLE;
	if (flags & FS_APPEND_FL)
		ip->flags |= BTRFS_INODE_APPEND;
	else
		ip->flags &= ~BTRFS_INODE_APPEND;
	if (flags & FS_NODUMP_FL)
		ip->flags |= BTRFS_INODE_NODUMP;
	else
		ip->flags &= ~BTRFS_INODE_NODUMP;
	if (flags & FS_NOATIME_FL)
		ip->flags |= BTRFS_INODE_NOATIME;
	else
		ip->flags &= ~BTRFS_INODE_NOATIME;
	if (flags & FS_DIRSYNC_FL)
		ip->flags |= BTRFS_INODE_DIRSYNC;
	else
		ip->flags &= ~BTRFS_INODE_DIRSYNC;


	trans = btrfs_join_transaction(root, 1);
	BUG_ON(!trans);

	ret = btrfs_update_inode(trans, root, inode);
	BUG_ON(ret);

	btrfs_update_iflags(inode);
	inode->i_ctime = CURRENT_TIME;
	btrfs_end_transaction(trans, root);

	mnt_drop_write(file->f_path.mnt);
 out_unlock:
	mutex_unlock(&inode->i_mutex);
	return 0;
}
Пример #9
0
/**
 * cap_settime - Determine whether the current process may set the system clock
 * @ts: The time to set
 * @tz: The timezone to set
 *
 * Determine whether the current process may set the system clock and timezone
 * information, returning 0 if permission granted, -ve if denied.
 */
int cap_settime(struct timespec *ts, struct timezone *tz)
{
	if (!capable(CAP_SYS_TIME))
		return -EPERM;
	return 0;
}
Пример #10
0
static int slcan_open(struct tty_struct *tty)
{
	struct slcan *sl;
	int err;

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (tty->ops->write == NULL)
		return -EOPNOTSUPP;

	/* RTnetlink lock is misused here to serialize concurrent
	   opens of slcan channels. There are better ways, but it is
	   the simplest one.
	 */
	rtnl_lock();

	/* Collect hanged up channels. */
	slc_sync();

	sl = tty->disc_data;

	err = -EEXIST;
	/* First make sure we're not already connected. */
	if (sl && sl->magic == SLCAN_MAGIC)
		goto err_exit;

	/* OK.  Find a free SLCAN channel to use. */
	err = -ENFILE;
	sl = slc_alloc();
	if (sl == NULL)
		goto err_exit;

	sl->tty = tty;
	tty->disc_data = sl;

	if (!test_bit(SLF_INUSE, &sl->flags)) {
		/* Perform the low-level SLCAN initialization. */
		sl->rcount   = 0;
		sl->xleft    = 0;

		set_bit(SLF_INUSE, &sl->flags);

		err = register_netdevice(sl->dev);
		if (err)
			goto err_free_chan;
	}

	/* Done.  We have linked the TTY line to a channel. */
	rtnl_unlock();
	tty->receive_room = 65536;	/* We don't flow control */

	/* TTY layer expects 0 on success */
	return 0;

err_free_chan:
	sl->tty = NULL;
	tty->disc_data = NULL;
	clear_bit(SLF_INUSE, &sl->flags);

err_exit:
	rtnl_unlock();

	/* Count references from TTY module */
	return err;
}
Пример #11
0
static int
ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
	int err = 0;
	struct ip6_tnl_parm p;
	struct ip6_tnl *t = NULL;
	struct net *net = dev_net(dev);
	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);

	switch (cmd) {
	case SIOCGETTUNNEL:
		if (dev == ip6n->fb_tnl_dev) {
			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
				err = -EFAULT;
				break;
			}
			t = ip6_tnl_locate(net, &p, 0);
		}
		if (t == NULL)
			t = netdev_priv(dev);
		memcpy(&p, &t->parms, sizeof (p));
		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
			err = -EFAULT;
		}
		break;
	case SIOCADDTUNNEL:
	case SIOCCHGTUNNEL:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		err = -EFAULT;
		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
			break;
		err = -EINVAL;
		if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
		    p.proto != 0)
			break;
		t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL);
		if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
			if (t != NULL) {
				if (t->dev != dev) {
					err = -EEXIST;
					break;
				}
			} else
				t = netdev_priv(dev);

			ip6_tnl_unlink(ip6n, t);
			err = ip6_tnl_change(t, &p);
			ip6_tnl_link(ip6n, t);
			netdev_state_change(dev);
		}
		if (t) {
			err = 0;
			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p)))
				err = -EFAULT;

		} else
			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
		break;
	case SIOCDELTUNNEL:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;

		if (dev == ip6n->fb_tnl_dev) {
			err = -EFAULT;
			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
				break;
			err = -ENOENT;
			if ((t = ip6_tnl_locate(net, &p, 0)) == NULL)
				break;
			err = -EPERM;
			if (t->dev == ip6n->fb_tnl_dev)
				break;
			dev = t->dev;
		}
		err = 0;
		unregister_netdevice(dev);
		break;
	default:
		err = -EINVAL;
	}
	return err;
}
Пример #12
0
int scull_p_ioctl(struct file *filp, unsigned int cmd, unsigned long arg){

       int retval = 0, tmp, err = 0;

        /* extract the type and number bitfields, and don't
         * decode wrong cmds: return ENOTTY before access_ok()
         */
        if(_IOC_TYPE(cmd) != SCULL_IOC_MAGIC)
                return -ENOTTY;

        if(_IOC_NR(cmd) > SCULL_IOC_MAXNR)
                return -ENOTTY;

       /* the direction field is a bitmask (2 bits), and
        * VERIFY_WRITE catches R/W transfers. 'direction'
        * bitfield is user-oriented, while acces_ok() is
         * kernel-oriented, so the concept of "read" and
         * "write" is reversed
         */

        /* access_ok() returns non-zero as success and 0
	 * as error
         */

        if(_IOC_DIR(cmd) & _IOC_READ)
                err = !access_ok(VERIFY_WRITE, (void __user*)arg, _IOC_SIZE(cmd));

        else if(_IOC_DIR(cmd) & _IOC_WRITE)
                err = !access_ok(VERIFY_READ, (void __user*)arg, _IOC_SIZE(cmd));

        if(err)
                return -EFAULT;

        switch(cmd){

               case SCULL_IOCRESET:
                        scull_quantum = SCULL_QUANTUM;
                        scull_qset = SCULL_QSET;
                       break;

                case SCULL_IOCSQUANTUM: /* Set: arg points to the value */
                        if(!capable(CAP_SYS_ADMIN))
                                return -EPERM;

                        retval = __get_user(scull_quantum, (int __user*)arg);
                        break;

                case SCULL_IOCTQUANTUM: /* Tell: arg is the value */
                       if(!capable(CAP_SYS_ADMIN))
                                return -EPERM;

                        scull_quantum = arg;
                        break;

                case SCULL_IOCGQUANTUM: /* Get: arg is pointer to result */
                        retval = __put_user(scull_quantum, (int __user*)arg);
                       break;

                case SCULL_IOCQQUANTUM: /* Query: return it (it's positive) */
                        return scull_quantum;

                case SCULL_IOCXQUANTUM: /* eXchange: use arg as pointer */
                        if(!capable(CAP_SYS_ADMIN))
                                return -EPERM;
                        tmp = scull_quantum;
                        retval = __get_user(scull_quantum, (int __user*)arg);
                        if(retval == 0)
                                retval = __put_user(tmp, (int __user *)arg);
                        break;

                case SCULL_IOCHQUANTUM: /* sHift: like Tell + Query */
                        if(!capable(CAP_SYS_ADMIN))
                                return -EPERM;
                        tmp = scull_quantum;
                        scull_quantum = arg;
                       return tmp;

                default: /* Redundant, as cmd was checked against MAXNR */
			return -ENOTTY;
	
        }
        return retval;

}
Пример #13
0
static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
	struct bnep_connlist_req cl;
	struct bnep_connadd_req  ca;
	struct bnep_conndel_req  cd;
	struct bnep_conninfo ci;
	struct socket *nsock;
	void __user *argp = (void __user *)arg;
	int err;

	BT_DBG("cmd %x arg %lx", cmd, arg);

	switch (cmd) {
	case BNEPCONNADD:
		if (!capable(CAP_NET_ADMIN))
			return -EACCES;

		if (copy_from_user(&ca, argp, sizeof(ca)))
			return -EFAULT;

		nsock = sockfd_lookup(ca.sock, &err);
		if (!nsock)
			return err;

		if (nsock->sk->sk_state != BT_CONNECTED) {
			sockfd_put(nsock);
			return -EBADFD;
		}
		ca.device[sizeof(ca.device)-1] = 0;

		err = bnep_add_connection(&ca, nsock);
		if (!err) {
			if (copy_to_user(argp, &ca, sizeof(ca)))
				err = -EFAULT;
		} else
			sockfd_put(nsock);

		return err;

	case BNEPCONNDEL:
		if (!capable(CAP_NET_ADMIN))
			return -EACCES;

		if (copy_from_user(&cd, argp, sizeof(cd)))
			return -EFAULT;

		return bnep_del_connection(&cd);

	case BNEPGETCONNLIST:
		if (copy_from_user(&cl, argp, sizeof(cl)))
			return -EFAULT;

		if (cl.cnum <= 0)
			return -EINVAL;

		err = bnep_get_connlist(&cl);
		if (!err && copy_to_user(argp, &cl, sizeof(cl)))
			return -EFAULT;

		return err;

	case BNEPGETCONNINFO:
		if (copy_from_user(&ci, argp, sizeof(ci)))
			return -EFAULT;

		err = bnep_get_conninfo(&ci);
		if (!err && copy_to_user(argp, &ci, sizeof(ci)))
			return -EFAULT;

		return err;

	default:
		return -EINVAL;
	}

	return 0;
}
Пример #14
0
asmlinkage int
sunos_mount(char __user *type, char __user *dir, int flags, void __user *data)
{
	int linux_flags = 0;
	int ret = -EINVAL;
	char *dev_fname = NULL;
	char *dir_page, *type_page;

	if (!capable (CAP_SYS_ADMIN))
		return -EPERM;
		
	lock_kernel();
	/* We don't handle the integer fs type */
	if ((flags & SMNT_NEWTYPE) == 0)
		goto out;

	/* Do not allow for those flags we don't support */
	if (flags & (SMNT_GRPID|SMNT_NOSUB|SMNT_MULTI|SMNT_SYS5))
		goto out;

	if (flags & SMNT_REMOUNT)
		linux_flags |= MS_REMOUNT;
	if (flags & SMNT_RDONLY)
		linux_flags |= MS_RDONLY;
	if (flags & SMNT_NOSUID)
		linux_flags |= MS_NOSUID;

	dir_page = getname(dir);
	ret = PTR_ERR(dir_page);
	if (IS_ERR(dir_page))
		goto out;

	type_page = getname(type);
	ret = PTR_ERR(type_page);
	if (IS_ERR(type_page))
		goto out1;

	if (strcmp(type_page, "ext2") == 0) {
		dev_fname = getname(data);
	} else if (strcmp(type_page, "iso9660") == 0) {
		dev_fname = getname(data);
	} else if (strcmp(type_page, "minix") == 0) {
		dev_fname = getname(data);
	} else if (strcmp(type_page, "nfs") == 0) {
		ret = sunos_nfs_mount (dir_page, flags, data);
		goto out2;
        } else if (strcmp(type_page, "ufs") == 0) {
		printk("Warning: UFS filesystem mounts unsupported.\n");
		ret = -ENODEV;
		goto out2;
	} else if (strcmp(type_page, "proc")) {
		ret = -ENODEV;
		goto out2;
	}
	ret = PTR_ERR(dev_fname);
	if (IS_ERR(dev_fname))
		goto out2;
	ret = do_mount(dev_fname, dir_page, type_page, linux_flags, NULL);
	if (dev_fname)
		putname(dev_fname);
out2:
	putname(type_page);
out1:
	putname(dir_page);
out:
	unlock_kernel();
	return ret;
}
Пример #15
0
int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
{
	int err;
	struct arpreq r;
	struct net_device *dev = NULL;

	switch (cmd) {
		case SIOCDARP:
		case SIOCSARP:
			if (!capable(CAP_NET_ADMIN))
				return -EPERM;
		case SIOCGARP:
			err = copy_from_user(&r, arg, sizeof(struct arpreq));
			if (err)
				return -EFAULT;
			break;
		default:
			return -EINVAL;
	}

	if (r.arp_pa.sa_family != AF_INET)
		return -EPFNOSUPPORT;

	if (!(r.arp_flags & ATF_PUBL) &&
	    (r.arp_flags & (ATF_NETMASK|ATF_DONTPUB)))
		return -EINVAL;
	if (!(r.arp_flags & ATF_NETMASK))
		((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
							   htonl(0xFFFFFFFFUL);
	rtnl_lock();
	if (r.arp_dev[0]) {
		err = -ENODEV;
		if ((dev = __dev_get_by_name(net, r.arp_dev)) == NULL)
			goto out;

		/* Mmmm... It is wrong... ARPHRD_NETROM==0 */
		if (!r.arp_ha.sa_family)
			r.arp_ha.sa_family = dev->type;
		err = -EINVAL;
		if ((r.arp_flags & ATF_COM) && r.arp_ha.sa_family != dev->type)
			goto out;
	} else if (cmd == SIOCGARP) {
		err = -ENODEV;
		goto out;
	}

	switch (cmd) {
	case SIOCDARP:
		err = arp_req_delete(net, &r, dev);
		break;
	case SIOCSARP:
		err = arp_req_set(net, &r, dev);
		break;
	case SIOCGARP:
		err = arp_req_get(&r, dev);
		if (!err && copy_to_user(arg, &r, sizeof(r)))
			err = -EFAULT;
		break;
	}
out:
	rtnl_unlock();
	return err;
}
Пример #16
0
asmlinkage long
sys_pciconfig_read(unsigned long bus, unsigned long dfn,
		   unsigned long off, unsigned long len,
		   void __user *buf)
{
	struct pci_dev *dev;
	u8 byte;
	u16 word;
	u32 dword;
	long err, cfg_ret;

	err = -EPERM;
	if (!capable(CAP_SYS_ADMIN))
		goto error;

	err = -ENODEV;
	dev = pci_find_slot(bus, dfn);
	if (!dev)
		goto error;

	lock_kernel();
	switch (len) {
	case 1:
		cfg_ret = pci_read_config_byte(dev, off, &byte);
		break;
	case 2:
		cfg_ret = pci_read_config_word(dev, off, &word);
		break;
	case 4:
		cfg_ret = pci_read_config_dword(dev, off, &dword);
		break;
	default:
		err = -EINVAL;
		unlock_kernel();
		goto error;
	};
	unlock_kernel();

	err = -EIO;
	if (cfg_ret != PCIBIOS_SUCCESSFUL)
		goto error;

	switch (len) {
	case 1:
		err = put_user(byte, (unsigned char __user *)buf);
		break;
	case 2:
		err = put_user(word, (unsigned short __user *)buf);
		break;
	case 4:
		err = put_user(dword, (unsigned int __user *)buf);
		break;
	};
	return err;

error:
	/* ??? XFree86 doesn't even check the return value.  They
	   just look for 0xffffffff in the output, since that's what
	   they get instead of a machine check on x86.  */
	switch (len) {
	case 1:
		put_user(-1, (unsigned char __user *)buf);
		break;
	case 2:
		put_user(-1, (unsigned short __user *)buf);
		break;
	case 4:
		put_user(-1, (unsigned int __user *)buf);
		break;
	};
	return err;
}
Пример #17
0
static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
	struct video_device *vdev = video_devdata(file);
	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
	struct v4l2_fh *vfh = file->private_data;
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
	struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
#endif

	switch (cmd) {
	case VIDIOC_QUERYCTRL:
		return v4l2_queryctrl(vfh->ctrl_handler, arg);

	case VIDIOC_QUERYMENU:
		return v4l2_querymenu(vfh->ctrl_handler, arg);

	case VIDIOC_G_CTRL:
		return v4l2_g_ctrl(vfh->ctrl_handler, arg);

	case VIDIOC_S_CTRL:
		return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);

	case VIDIOC_G_EXT_CTRLS:
		return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);

	case VIDIOC_S_EXT_CTRLS:
		return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);

	case VIDIOC_TRY_EXT_CTRLS:
		return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);

	case VIDIOC_DQEVENT:
		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
			return -ENOIOCTLCMD;

		return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);

	case VIDIOC_SUBSCRIBE_EVENT:
		return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);

	case VIDIOC_UNSUBSCRIBE_EVENT:
		return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);

#ifdef CONFIG_VIDEO_ADV_DEBUG
	case VIDIOC_DBG_G_REGISTER:
	{
		struct v4l2_dbg_register *p = arg;

		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;
		return v4l2_subdev_call(sd, core, g_register, p);
	}
	case VIDIOC_DBG_S_REGISTER:
	{
		struct v4l2_dbg_register *p = arg;

		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;
		return v4l2_subdev_call(sd, core, s_register, p);
	}
#endif

	case VIDIOC_LOG_STATUS: {
		int ret;

		pr_info("%s: =================  START STATUS  =================\n",
			sd->name);
		ret = v4l2_subdev_call(sd, core, log_status);
		pr_info("%s: ==================  END STATUS  ==================\n",
			sd->name);
		return ret;
	}

#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
	case VIDIOC_SUBDEV_G_FMT: {
		struct v4l2_subdev_format *format = arg;

		if (format->which != V4L2_SUBDEV_FORMAT_TRY &&
		    format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
			return -EINVAL;

		if (format->pad >= sd->entity.num_pads)
			return -EINVAL;

		return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh, format);
	}

	case VIDIOC_SUBDEV_S_FMT: {
		struct v4l2_subdev_format *format = arg;

		if (format->which != V4L2_SUBDEV_FORMAT_TRY &&
		    format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
			return -EINVAL;

		if (format->pad >= sd->entity.num_pads)
			return -EINVAL;

		return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh, format);
	}

	case VIDIOC_SUBDEV_G_CROP: {
		struct v4l2_subdev_crop *crop = arg;
		struct v4l2_subdev_selection sel;
		int rval;

		if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
		    crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
			return -EINVAL;

		if (crop->pad >= sd->entity.num_pads)
			return -EINVAL;

		rval = v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop);
		if (rval != -ENOIOCTLCMD)
			return rval;

		memset(&sel, 0, sizeof(sel));
		sel.which = crop->which;
		sel.pad = crop->pad;
		sel.target = V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL;

		rval = v4l2_subdev_call(
			sd, pad, get_selection, subdev_fh, &sel);

		crop->rect = sel.r;

		return rval;
	}

	case VIDIOC_SUBDEV_S_CROP: {
		struct v4l2_subdev_crop *crop = arg;
		struct v4l2_subdev_selection sel;
		int rval;

		if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
		    crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
			return -EINVAL;

		if (crop->pad >= sd->entity.num_pads)
			return -EINVAL;

		rval = v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop);
		if (rval != -ENOIOCTLCMD)
			return rval;

		memset(&sel, 0, sizeof(sel));
		sel.which = crop->which;
		sel.pad = crop->pad;
		sel.target = V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL;
		sel.r = crop->rect;

		rval = v4l2_subdev_call(
			sd, pad, set_selection, subdev_fh, &sel);

		crop->rect = sel.r;

		return rval;
	}

	case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
		struct v4l2_subdev_mbus_code_enum *code = arg;

		if (code->pad >= sd->entity.num_pads)
			return -EINVAL;

		return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh,
					code);
	}

	case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
		struct v4l2_subdev_frame_size_enum *fse = arg;

		if (fse->pad >= sd->entity.num_pads)
			return -EINVAL;

		return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh,
					fse);
	}

	case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
		return v4l2_subdev_call(sd, video, g_frame_interval, arg);

	case VIDIOC_SUBDEV_S_FRAME_INTERVAL:
		return v4l2_subdev_call(sd, video, s_frame_interval, arg);

	case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
		struct v4l2_subdev_frame_interval_enum *fie = arg;

		if (fie->pad >= sd->entity.num_pads)
			return -EINVAL;

		return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh,
					fie);
	}

	case VIDIOC_SUBDEV_G_SELECTION: {
		struct v4l2_subdev_selection *sel = arg;

		if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
		    sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
			return -EINVAL;

		if (sel->pad >= sd->entity.num_pads)
			return -EINVAL;

		return v4l2_subdev_call(
			sd, pad, get_selection, subdev_fh, sel);
	}

	case VIDIOC_SUBDEV_S_SELECTION: {
		struct v4l2_subdev_selection *sel = arg;

		if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
		    sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
			return -EINVAL;

		if (sel->pad >= sd->entity.num_pads)
			return -EINVAL;

		return v4l2_subdev_call(
			sd, pad, set_selection, subdev_fh, sel);
	}
#endif
	default:
		return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
	}

	return 0;
}
Пример #18
0
/*
 * IOCTL
 */
int lighty_ioctl(struct inode * i_node, struct file * file, unsigned int cmd,
                                                            unsigned long arg)
{
	int err = 0;
	int retval = 0;
	struct urb *usb_led;
	char command; // a, b, c ,d, e, f
	char *buf;
	struct usb_lighty *dev = file->private_data;
       /*
        * extract the type and number bitfields, and don't decode
        * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
        */
        if (_IOC_TYPE(cmd) != LIGHTY_IOCTL_MAGIC)
        {
                printk(KERN_NOTICE
                        "lighty_ioctl: !lighty_IOC_MAGIC\n");
                return -ENOTTY;
        }
        if (_IOC_NR(cmd) > LIGHTY_IOCTL_MAX)
        {
                printk(KERN_NOTICE
                        "lighty_ioctl:  > lighty_IOC_MAXNR\n");
                return -ENOTTY;
        }

       /*
        * If not root/sysadmin, go away 
        */
        if (! capable (CAP_SYS_ADMIN))
                return -EPERM;

        /*
         * the direction is a bitmask, and VERIFY_WRITE catches R/W
         * transfers. `Type' is user-oriented, while
         * access_ok is kernel-oriented, so the concept of "read" and
         * "write" is reversed
         */
        if (_IOC_DIR(cmd) & _IOC_READ)
                err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
        else if (_IOC_DIR(cmd) & _IOC_WRITE)
                err =  !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
        if (err)
        {
                printk(KERN_NOTICE
                        "lighty_ioctl: access !ok\n");
                return -EFAULT;
        }


        switch(cmd) {
                case LIGHTY_IOCTL_1RED:
                        printk(KERN_NOTICE
                                "LIGHTY_IOCTL_1RED\n");
			command = 'a';
			break;
                case LIGHTY_IOCTL_1GREEN:
                        printk(KERN_NOTICE
                                "LIGHTY_IOCTL_1GREEN\n");
			command = 'b';
			break;
                case LIGHTY_IOCTL_1BLUE:
                        printk(KERN_NOTICE
                                "LIGHTY_IOCTL_1BLUE\n");
			command = 'c';
			break;
                case LIGHTY_IOCTL_2RED:
                        printk(KERN_NOTICE
                                "LIGHTY_IOCTL_2RED\n");
			command = 'd';
			break;
                case LIGHTY_IOCTL_2GREEN:
                        printk(KERN_NOTICE
                                "LIGHTY_IOCTL_2GREEN\n");
			command = 'e';
			break;
                case LIGHTY_IOCTL_2BLUE:
                        printk(KERN_NOTICE
                                "LIGHTY_IOCTL_2BLUE\n");
			command = 'f';
			break;
		default:
			printk(KERN_NOTICE
				"Not a known command %x\n", cmd);
			return -ENOMEM;
	}
	usb_led = usb_alloc_urb(0, GFP_KERNEL);
	if (!usb_led) {
		return -ENOMEM;
	}
	buf = usb_buffer_alloc(dev->udev, 64, GFP_KERNEL, &usb_led->transfer_dma);
	if (!buf) {
		printk (KERN_NOTICE "usb_buffer_alloc failed\n");
    		usb_buffer_free(dev->udev, usb_led->transfer_buffer_length,
            		usb_led->transfer_buffer, usb_led->transfer_dma);
		return -ENOMEM;
	}
	buf[0] = command; // a, b, c, d, e or f
	usb_fill_int_urb(usb_led, dev->udev, usb_sndintpipe(dev->udev, dev->intr_out_endpointAddr), buf,
				64, (usb_complete_t)lighty_write_intr_callback, dev, 250);
	usb_led->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	if( (retval = usb_submit_urb(usb_led, GFP_KERNEL)) ) {
		err("%s - failed submitting write urb, error %d", __FUNCTION__, retval);
	}
	return 0;
}
Пример #19
0
int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
{
	struct ifreq ifr;
	struct sockaddr_in sin_orig;
	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
	struct in_device *in_dev;
	struct in_ifaddr **ifap = NULL;
	struct in_ifaddr *ifa = NULL;
	struct net_device *dev;
	char *colon;
	int ret = -EFAULT;
	int tryaddrmatch = 0;

	/*
	 *	Fetch the caller's info block into kernel space
	 */

	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
		goto out;
	ifr.ifr_name[IFNAMSIZ - 1] = 0;

	/* save original address for comparison */
	memcpy(&sin_orig, sin, sizeof(*sin));

	colon = strchr(ifr.ifr_name, ':');
	if (colon)
		*colon = 0;

	dev_load(net, ifr.ifr_name);

	switch (cmd) {
	case SIOCGIFADDR:	/* Get interface address */
	case SIOCGIFBRDADDR:	/* Get the broadcast address */
	case SIOCGIFDSTADDR:	/* Get the destination address */
	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
		/* Note that these ioctls will not sleep,
		   so that we do not impose a lock.
		   One day we will be forced to put shlock here (I mean SMP)
		 */
		tryaddrmatch = (sin_orig.sin_family == AF_INET);
		memset(sin, 0, sizeof(*sin));
		sin->sin_family = AF_INET;
		break;

	case SIOCSIFFLAGS:
		ret = -EACCES;
		if (!capable(CAP_NET_ADMIN))
			goto out;
		break;
	case SIOCSIFADDR:	/* Set interface address (and family) */
	case SIOCSIFBRDADDR:	/* Set the broadcast address */
	case SIOCSIFDSTADDR:	/* Set the destination address */
	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
		ret = -EACCES;
		if (!capable(CAP_NET_ADMIN))
			goto out;
		ret = -EINVAL;
		if (sin->sin_family != AF_INET)
			goto out;
		break;
	default:
		ret = -EINVAL;
		goto out;
	}

	rtnl_lock();

	ret = -ENODEV;
	if ((dev = __dev_get_by_name(net, ifr.ifr_name)) == NULL)
		goto done;

	if (colon)
		*colon = ':';

	if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
		if (tryaddrmatch) {
			/* Matthias Andree */
			/* compare label and address (4.4BSD style) */
			/* note: we only do this for a limited set of ioctls
			   and only if the original address family was AF_INET.
			   This is checked above. */
			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
			     ifap = &ifa->ifa_next) {
				if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
				    sin_orig.sin_addr.s_addr ==
							ifa->ifa_address) {
					break; /* found */
				}
			}
		}
		/* we didn't get a match, maybe the application is
		   4.3BSD-style and passed in junk so we fall back to
		   comparing just the label */
		if (!ifa) {
			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
			     ifap = &ifa->ifa_next)
				if (!strcmp(ifr.ifr_name, ifa->ifa_label))
					break;
		}
	}

	ret = -EADDRNOTAVAIL;
	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
		goto done;

	switch (cmd) {
	case SIOCGIFADDR:	/* Get interface address */
		sin->sin_addr.s_addr = ifa->ifa_local;
		goto rarok;

	case SIOCGIFBRDADDR:	/* Get the broadcast address */
		sin->sin_addr.s_addr = ifa->ifa_broadcast;
		goto rarok;

	case SIOCGIFDSTADDR:	/* Get the destination address */
		sin->sin_addr.s_addr = ifa->ifa_address;
		goto rarok;

	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
		sin->sin_addr.s_addr = ifa->ifa_mask;
		goto rarok;

	case SIOCSIFFLAGS:
		if (colon) {
			ret = -EADDRNOTAVAIL;
			if (!ifa)
				break;
			ret = 0;
			if (!(ifr.ifr_flags & IFF_UP))
				inet_del_ifa(in_dev, ifap, 1);
			break;
		}
		ret = dev_change_flags(dev, ifr.ifr_flags);
		break;

	case SIOCSIFADDR:	/* Set interface address (and family) */
		ret = -EINVAL;
		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
			break;

		if (!ifa) {
			ret = -ENOBUFS;
			if ((ifa = inet_alloc_ifa()) == NULL)
				break;
			if (colon)
				memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
			else
				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
		} else {
			ret = 0;
			if (ifa->ifa_local == sin->sin_addr.s_addr)
				break;
			inet_del_ifa(in_dev, ifap, 0);
			ifa->ifa_broadcast = 0;
			ifa->ifa_scope = 0;
		}

		ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;

		if (!(dev->flags & IFF_POINTOPOINT)) {
			ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
			ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
			if ((dev->flags & IFF_BROADCAST) &&
			    ifa->ifa_prefixlen < 31)
				ifa->ifa_broadcast = ifa->ifa_address |
						     ~ifa->ifa_mask;
		} else {
			ifa->ifa_prefixlen = 32;
			ifa->ifa_mask = inet_make_mask(32);
		}
		ret = inet_set_ifa(dev, ifa);
		break;

	case SIOCSIFBRDADDR:	/* Set the broadcast address */
		ret = 0;
		if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
			inet_del_ifa(in_dev, ifap, 0);
			ifa->ifa_broadcast = sin->sin_addr.s_addr;
			inet_insert_ifa(ifa);
		}
		break;

	case SIOCSIFDSTADDR:	/* Set the destination address */
		ret = 0;
		if (ifa->ifa_address == sin->sin_addr.s_addr)
			break;
		ret = -EINVAL;
		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
			break;
		ret = 0;
		inet_del_ifa(in_dev, ifap, 0);
		ifa->ifa_address = sin->sin_addr.s_addr;
		inet_insert_ifa(ifa);
		break;

	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */

		/*
		 *	The mask we set must be legal.
		 */
		ret = -EINVAL;
		if (bad_mask(sin->sin_addr.s_addr, 0))
			break;
		ret = 0;
		if (ifa->ifa_mask != sin->sin_addr.s_addr) {
			__be32 old_mask = ifa->ifa_mask;
			inet_del_ifa(in_dev, ifap, 0);
			ifa->ifa_mask = sin->sin_addr.s_addr;
			ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);

			/* See if current broadcast address matches
			 * with current netmask, then recalculate
			 * the broadcast address. Otherwise it's a
			 * funny address, so don't touch it since
			 * the user seems to know what (s)he's doing...
			 */
			if ((dev->flags & IFF_BROADCAST) &&
			    (ifa->ifa_prefixlen < 31) &&
			    (ifa->ifa_broadcast ==
			     (ifa->ifa_local|~old_mask))) {
				ifa->ifa_broadcast = (ifa->ifa_local |
						      ~sin->sin_addr.s_addr);
			}
			inet_insert_ifa(ifa);
		}
		break;
	}
done:
	rtnl_unlock();
out:
	return ret;
rarok:
	rtnl_unlock();
	ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
	goto out;
}
Пример #20
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->gtime = cputime_zero;
	p->utimescaled = cputime_zero;
	p->stimescaled = cputime_zero;
	p->prev_utime = cputime_zero;
	p->prev_stime = cputime_zero;

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cgroup;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif

	p->bts = NULL;

	p->stack_start = stack_start;

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;

	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_policy;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_namespaces(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_io(clone_flags, p)))
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;

		if (clone_flags & CLONE_NEWPID) {
			retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
			if (retval < 0)
				goto bad_fork_free_pid;
		}
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if (current->nsproxy != p->nsproxy) {
		retval = ns_cgroup_clone(p, pid);
		if (retval)
			goto bad_fork_free_pid;
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		atomic_inc(&current->signal->count);
		atomic_inc(&current->signal->live);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		list_add_tail(&p->sibling, &p->real_parent->children);
		tracehook_finish_clone(p, clone_flags, trace);

		if (thread_group_leader(p)) {
			if (clone_flags & CLONE_NEWPID)
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			tty_kref_put(p->signal->tty);
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__get_cpu_var(process_counts)++;
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	perf_event_fork(p);
	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		__cleanup_signal(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Пример #21
0
long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    struct inode *inode = filp->f_dentry->d_inode;
    struct ext3_inode_info *ei = EXT3_I(inode);
    unsigned int flags;
    unsigned short rsv_window_size;

    ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);

    switch (cmd) {
    case EXT3_IOC_GETFLAGS:
        ext3_get_inode_flags(ei);
        flags = ei->i_flags & EXT3_FL_USER_VISIBLE;
        return put_user(flags, (int __user *) arg);
    case EXT3_IOC_SETFLAGS: {
        handle_t *handle = NULL;
        int err;
        struct ext3_iloc iloc;
        unsigned int oldflags;
        unsigned int jflag;

        if (!is_owner_or_cap(inode))
            return -EACCES;

        if (get_user(flags, (int __user *) arg))
            return -EFAULT;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;

        flags = ext3_mask_flags(inode->i_mode, flags);

        mutex_lock(&inode->i_mutex);

        /* Is it quota file? Do not allow user to mess with it */
        err = -EPERM;
        if (IS_NOQUOTA(inode))
            goto flags_out;

        oldflags = ei->i_flags;

        /* The JOURNAL_DATA flag is modifiable only by root */
        jflag = flags & EXT3_JOURNAL_DATA_FL;

        /*
         * The IMMUTABLE and APPEND_ONLY flags can only be changed by
         * the relevant capability.
         *
         * This test looks nicer. Thanks to Pauline Middelink
         */
        if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
            if (!capable(CAP_LINUX_IMMUTABLE))
                goto flags_out;
        }

        /*
         * The JOURNAL_DATA flag can only be changed by
         * the relevant capability.
         */
        if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) {
            if (!capable(CAP_SYS_ADMIN))
                goto flags_out;
        }

        handle = ext3_journal_start(inode, 1);
        if (IS_ERR(handle)) {
            err = PTR_ERR(handle);
            goto flags_out;
        }
        if (IS_SYNC(inode))
            handle->h_sync = 1;
        err = ext3_reserve_inode_write(handle, inode, &iloc);
        if (err)
            goto flags_err;

        flags = flags & EXT3_FL_USER_MODIFIABLE;
        flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
        ei->i_flags = flags;

        ext3_set_inode_flags(inode);
        inode->i_ctime = CURRENT_TIME_SEC;

        err = ext3_mark_iloc_dirty(handle, inode, &iloc);
flags_err:
        ext3_journal_stop(handle);
        if (err)
            goto flags_out;

        if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL))
            err = ext3_change_inode_journal_flag(inode, jflag);
flags_out:
        mutex_unlock(&inode->i_mutex);
        mnt_drop_write(filp->f_path.mnt);
        return err;
    }
    case EXT3_IOC_GETVERSION:
    case EXT3_IOC_GETVERSION_OLD:
        return put_user(inode->i_generation, (int __user *) arg);
    case EXT3_IOC_SETVERSION:
    case EXT3_IOC_SETVERSION_OLD: {
        handle_t *handle;
        struct ext3_iloc iloc;
        __u32 generation;
        int err;

        if (!is_owner_or_cap(inode))
            return -EPERM;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;
        if (get_user(generation, (int __user *) arg)) {
            err = -EFAULT;
            goto setversion_out;
        }

        handle = ext3_journal_start(inode, 1);
        if (IS_ERR(handle)) {
            err = PTR_ERR(handle);
            goto setversion_out;
        }
        err = ext3_reserve_inode_write(handle, inode, &iloc);
        if (err == 0) {
            inode->i_ctime = CURRENT_TIME_SEC;
            inode->i_generation = generation;
            err = ext3_mark_iloc_dirty(handle, inode, &iloc);
        }
        ext3_journal_stop(handle);
setversion_out:
        mnt_drop_write(filp->f_path.mnt);
        return err;
    }
#ifdef CONFIG_JBD_DEBUG
    case EXT3_IOC_WAIT_FOR_READONLY:
        /*
         * This is racy - by the time we're woken up and running,
         * the superblock could be released.  And the module could
         * have been unloaded.  So sue me.
         *
         * Returns 1 if it slept, else zero.
         */
    {
        struct super_block *sb = inode->i_sb;
        DECLARE_WAITQUEUE(wait, current);
        int ret = 0;

        set_current_state(TASK_INTERRUPTIBLE);
        add_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait);
        if (timer_pending(&EXT3_SB(sb)->turn_ro_timer)) {
            schedule();
            ret = 1;
        }
        remove_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait);
        return ret;
    }
#endif
    case EXT3_IOC_GETRSVSZ:
        if (test_opt(inode->i_sb, RESERVATION)
                && S_ISREG(inode->i_mode)
                && ei->i_block_alloc_info) {
            rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size;
            return put_user(rsv_window_size, (int __user *)arg);
        }
        return -ENOTTY;
    case EXT3_IOC_SETRSVSZ: {
        int err;

        if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
            return -ENOTTY;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;

        if (!is_owner_or_cap(inode)) {
            err = -EACCES;
            goto setrsvsz_out;
        }

        if (get_user(rsv_window_size, (int __user *)arg)) {
            err = -EFAULT;
            goto setrsvsz_out;
        }

        if (rsv_window_size > EXT3_MAX_RESERVE_BLOCKS)
            rsv_window_size = EXT3_MAX_RESERVE_BLOCKS;

        /*
         * need to allocate reservation structure for this inode
         * before set the window size
         */
        mutex_lock(&ei->truncate_mutex);
        if (!ei->i_block_alloc_info)
            ext3_init_block_alloc_info(inode);

        if (ei->i_block_alloc_info) {
            struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
            rsv->rsv_goal_size = rsv_window_size;
        }
        mutex_unlock(&ei->truncate_mutex);
setrsvsz_out:
        mnt_drop_write(filp->f_path.mnt);
        return err;
    }
    case EXT3_IOC_GROUP_EXTEND: {
        ext3_fsblk_t n_blocks_count;
        struct super_block *sb = inode->i_sb;
        int err, err2;

        if (!capable(CAP_SYS_RESOURCE))
            return -EPERM;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;

        if (get_user(n_blocks_count, (__u32 __user *)arg)) {
            err = -EFAULT;
            goto group_extend_out;
        }
        err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count);
        journal_lock_updates(EXT3_SB(sb)->s_journal);
        err2 = journal_flush(EXT3_SB(sb)->s_journal);
        journal_unlock_updates(EXT3_SB(sb)->s_journal);
        if (err == 0)
            err = err2;
group_extend_out:
        mnt_drop_write(filp->f_path.mnt);
        return err;
    }
    case EXT3_IOC_GROUP_ADD: {
        struct ext3_new_group_data input;
        struct super_block *sb = inode->i_sb;
        int err, err2;

        if (!capable(CAP_SYS_RESOURCE))
            return -EPERM;

        err = mnt_want_write(filp->f_path.mnt);
        if (err)
            return err;

        if (copy_from_user(&input, (struct ext3_new_group_input __user *)arg,
                           sizeof(input))) {
            err = -EFAULT;
            goto group_add_out;
        }

        err = ext3_group_add(sb, &input);
        journal_lock_updates(EXT3_SB(sb)->s_journal);
        err2 = journal_flush(EXT3_SB(sb)->s_journal);
        journal_unlock_updates(EXT3_SB(sb)->s_journal);
        if (err == 0)
            err = err2;
group_add_out:
        mnt_drop_write(filp->f_path.mnt);
        return err;
    }


    default:
        return -ENOTTY;
    }
}
Пример #22
0
static int ds_ioctl(struct inode * inode, struct file * file,
                    u_int cmd, u_long arg)
{
    socket_t i = MINOR(inode->i_rdev);
    socket_info_t *s;
    u_int size;
    int ret, err;
    ds_ioctl_arg_t buf;

    DEBUG(2, "ds_ioctl(socket %d, %#x, %#lx)\n", i, cmd, arg);

    if ((i >= sockets) || (sockets == 0))
        return -ENODEV;
    s = &socket_table[i];

    size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
    if (size > sizeof(ds_ioctl_arg_t)) return -EINVAL;

    /* Permission check */
    if (!(cmd & IOC_OUT) && !capable(CAP_SYS_ADMIN))
        return -EPERM;

#ifndef MACH
    if (cmd & IOC_IN) {
        err = verify_area(VERIFY_READ, (char *)arg, size);
        if (err) {
            DEBUG(3, "ds_ioctl(): verify_read = %d\n", err);
            return err;
        }
    }
    if (cmd & IOC_OUT) {
        err = verify_area(VERIFY_WRITE, (char *)arg, size);
        if (err) {
            DEBUG(3, "ds_ioctl(): verify_write = %d\n", err);
            return err;
        }
    }
#endif

    err = ret = 0;

#ifndef MACH
    if (cmd & IOC_IN) copy_from_user((char *)&buf, (char *)arg, size);
#else
    if (cmd & IOC_IN) memcpy((char *) &buf, (char *) arg, size);
#endif

    switch (cmd) {
    case DS_ADJUST_RESOURCE_INFO:
        ret = CardServices(AdjustResourceInfo, s->handle, &buf.adjust);
        break;
    case DS_GET_CARD_SERVICES_INFO:
        ret = CardServices(GetCardServicesInfo, &buf.servinfo);
        break;
    case DS_GET_CONFIGURATION_INFO:
        ret = CardServices(GetConfigurationInfo, s->handle, &buf.config);
        break;
    case DS_GET_FIRST_TUPLE:
        ret = CardServices(GetFirstTuple, s->handle, &buf.tuple);
        break;
    case DS_GET_NEXT_TUPLE:
        ret = CardServices(GetNextTuple, s->handle, &buf.tuple);
        break;
    case DS_GET_TUPLE_DATA:
        buf.tuple.TupleData = buf.tuple_parse.data;
        buf.tuple.TupleDataMax = sizeof(buf.tuple_parse.data);
        ret = CardServices(GetTupleData, s->handle, &buf.tuple);
        break;
    case DS_PARSE_TUPLE:
        buf.tuple.TupleData = buf.tuple_parse.data;
        ret = CardServices(ParseTuple, s->handle, &buf.tuple,
                           &buf.tuple_parse.parse);
        break;
    case DS_RESET_CARD:
        ret = CardServices(ResetCard, s->handle, NULL);
        break;
    case DS_GET_STATUS:
        ret = CardServices(GetStatus, s->handle, &buf.status);
        break;
    case DS_VALIDATE_CIS:
        ret = CardServices(ValidateCIS, s->handle, &buf.cisinfo);
        break;
    case DS_SUSPEND_CARD:
        ret = CardServices(SuspendCard, s->handle, NULL);
        break;
    case DS_RESUME_CARD:
        ret = CardServices(ResumeCard, s->handle, NULL);
        break;
    case DS_EJECT_CARD:
        ret = CardServices(EjectCard, s->handle, NULL);
        break;
    case DS_INSERT_CARD:
        ret = CardServices(InsertCard, s->handle, NULL);
        break;
    case DS_ACCESS_CONFIGURATION_REGISTER:
        if ((buf.conf_reg.Action == CS_WRITE) && !capable(CAP_SYS_ADMIN))
            return -EPERM;
        ret = CardServices(AccessConfigurationRegister, s->handle,
                           &buf.conf_reg);
        break;
    case DS_GET_FIRST_REGION:
        ret = CardServices(GetFirstRegion, s->handle, &buf.region);
        break;
    case DS_GET_NEXT_REGION:
        ret = CardServices(GetNextRegion, s->handle, &buf.region);
        break;
    case DS_GET_FIRST_WINDOW:
        buf.win_info.handle = (window_handle_t)s->handle;
        ret = CardServices(GetFirstWindow, &buf.win_info.handle,
                           &buf.win_info.window);
        break;
    case DS_GET_NEXT_WINDOW:
        ret = CardServices(GetNextWindow, &buf.win_info.handle,
                           &buf.win_info.window);
        break;
    case DS_GET_MEM_PAGE:
        ret = CardServices(GetMemPage, buf.win_info.handle,
                           &buf.win_info.map);
        break;
    case DS_REPLACE_CIS:
        ret = CardServices(ReplaceCIS, s->handle, &buf.cisdump);
        break;
    case DS_BIND_REQUEST:
        if (!capable(CAP_SYS_ADMIN)) return -EPERM;
        err = bind_request(i, &buf.bind_info);
        break;
    case DS_GET_DEVICE_INFO:
        err = get_device_info(i, &buf.bind_info, 1);
        break;
    case DS_GET_NEXT_DEVICE:
        err = get_device_info(i, &buf.bind_info, 0);
        break;
    case DS_UNBIND_REQUEST:
        err = unbind_request(i, &buf.bind_info);
        break;
    case DS_BIND_MTD:
        if (!capable(CAP_SYS_ADMIN)) return -EPERM;
        err = bind_mtd(i, &buf.mtd_info);
        break;
    default:
        err = -EINVAL;
    }

    if ((err == 0) && (ret != CS_SUCCESS)) {
        DEBUG(2, "ds_ioctl: ret = %d\n", ret);
        switch (ret) {
        case CS_BAD_SOCKET:
        case CS_NO_CARD:
            err = -ENODEV;
            break;
        case CS_BAD_ARGS:
        case CS_BAD_ATTRIBUTE:
        case CS_BAD_IRQ:
        case CS_BAD_TUPLE:
            err = -EINVAL;
            break;
        case CS_IN_USE:
            err = -EBUSY;
            break;
        case CS_OUT_OF_RESOURCE:
            err = -ENOSPC;
            break;
        case CS_NO_MORE_ITEMS:
            err = -ENODATA;
            break;
        case CS_UNSUPPORTED_FUNCTION:
            err = -ENOSYS;
            break;
        default:
            err = -EIO;
            break;
        }
    }

#ifndef MACH
    if (cmd & IOC_OUT) copy_to_user((char *)arg, (char *)&buf, size);
#else
    if (cmd & IOC_OUT) memcpy((char *) arg, (char *) &buf, size);
#endif

    return err;
} /* ds_ioctl */
Пример #23
0
/*
 * ioctl calls for this driver. Why return -ENOTTY upon error? Because
 * POSIX says so!
 */
static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	/* Some sanity checks. */
	if (_IOC_TYPE(cmd) != RTC_MAGIC)
		return -ENOTTY;

	if (_IOC_NR(cmd) > RTC_MAX_IOCTL)
		return -ENOTTY;

	switch (cmd) {
	case RTC_RD_TIME:
	{
		struct rtc_time tm;

		mutex_lock(&rtc_lock);
		memset(&tm, 0, sizeof tm);
		get_rtc_time(&tm);

		if (copy_to_user((struct rtc_time *) arg, &tm,
				 sizeof tm)) {
			mutex_unlock(&rtc_lock);
			return -EFAULT;
		}

		mutex_unlock(&rtc_lock);

		return 0;
	}
	case RTC_SET_TIME:
	{
		int leap;
		int year;
		int century;
		struct rtc_time tm;

		memset(&tm, 0, sizeof tm);
		if (!capable(CAP_SYS_TIME))
			return -EPERM;

		if (copy_from_user(&tm, (struct rtc_time *) arg, sizeof tm))
			return -EFAULT;

		/* Convert from struct tm to struct rtc_time. */
		tm.tm_year += 1900;
		tm.tm_mon += 1;

		/*
		 * Check if tm.tm_year is a leap year. A year is a leap
		 * year if it is divisible by 4 but not 100, except
		 * that years divisible by 400 _are_ leap years.
		 */
		year = tm.tm_year;
		leap = (tm.tm_mon == 2) &&
			((year % 4 == 0 && year % 100 != 0) || year % 400 == 0);

		/* Perform some sanity checks. */
		if ((tm.tm_year < 1970) ||
		    (tm.tm_mon > 12) ||
		    (tm.tm_mday == 0) ||
		    (tm.tm_mday > days_in_month[tm.tm_mon] + leap) ||
		    (tm.tm_wday >= 7) ||
		    (tm.tm_hour >= 24) ||
		    (tm.tm_min >= 60) ||
		    (tm.tm_sec >= 60))
			return -EINVAL;

		century = (tm.tm_year >= 2000) ? 0x80 : 0;
		tm.tm_year = tm.tm_year % 100;

		tm.tm_year = bin2bcd(tm.tm_year);
		tm.tm_mon = bin2bcd(tm.tm_mon);
		tm.tm_mday = bin2bcd(tm.tm_mday);
		tm.tm_hour = bin2bcd(tm.tm_hour);
		tm.tm_min = bin2bcd(tm.tm_min);
		tm.tm_sec = bin2bcd(tm.tm_sec);
		tm.tm_mon |= century;

		mutex_lock(&rtc_lock);

		rtc_write(RTC_YEAR, tm.tm_year);
		rtc_write(RTC_MONTH, tm.tm_mon);
		rtc_write(RTC_WEEKDAY, tm.tm_wday); /* Not coded in BCD. */
		rtc_write(RTC_DAY_OF_MONTH, tm.tm_mday);
		rtc_write(RTC_HOURS, tm.tm_hour);
		rtc_write(RTC_MINUTES, tm.tm_min);
		rtc_write(RTC_SECONDS, tm.tm_sec);

		mutex_unlock(&rtc_lock);

		return 0;
	}
	case RTC_VL_READ:
		if (voltage_low) {
			printk(KERN_ERR "%s: RTC Voltage Low - "
			       "reliable date/time information is no "
			       "longer guaranteed!\n", PCF8563_NAME);
		}

		if (copy_to_user((int *) arg, &voltage_low, sizeof(int)))
			return -EFAULT;
		return 0;

	case RTC_VL_CLR:
	{
		/* Clear the VL bit in the seconds register in case
		 * the time has not been set already (which would
		 * have cleared it). This does not really matter
		 * because of the cached voltage_low value but do it
		 * anyway for consistency. */

		int ret = rtc_read(RTC_SECONDS);

		rtc_write(RTC_SECONDS, (ret & 0x7F));

		/* Clear the cached value. */
		voltage_low = 0;

		return 0;
	}
	default:
		return -ENOTTY;
	}

	return 0;
}
Пример #24
0
SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
{
	int class = IOPRIO_PRIO_CLASS(ioprio);
	int data = IOPRIO_PRIO_DATA(ioprio);
	struct task_struct *p, *g;
	struct user_struct *user;
	struct pid *pgrp;
	int ret;

	switch (class) {
		case IOPRIO_CLASS_RT:
			if (!capable(CAP_SYS_ADMIN))
				return -EPERM;
			/* fall through, rt has prio field too */
		case IOPRIO_CLASS_BE:
			if (data >= IOPRIO_BE_NR || data < 0)
				return -EINVAL;

			break;
		case IOPRIO_CLASS_IDLE:
			break;
		case IOPRIO_CLASS_NONE:
			if (data)
				return -EINVAL;
			break;
		default:
			return -EINVAL;
	}

	ret = -ESRCH;
	rcu_read_lock();
	switch (which) {
		case IOPRIO_WHO_PROCESS:
			if (!who)
				p = current;
			else
				p = find_task_by_vpid(who);
			if (p)
				ret = set_task_ioprio(p, ioprio);
			break;
		case IOPRIO_WHO_PGRP:
			if (!who)
				pgrp = task_pgrp(current);
			else
				pgrp = find_vpid(who);
			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
				ret = set_task_ioprio(p, ioprio);
				if (ret)
					break;
			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
			break;
		case IOPRIO_WHO_USER:
			if (!who)
				user = current_user();
			else
				user = find_user(who);

			if (!user)
				break;

			do_each_thread(g, p) {
				if (__task_cred(p)->uid != who)
					continue;
				ret = set_task_ioprio(p, ioprio);
				if (ret)
					goto free_uid;
			} while_each_thread(g, p);
free_uid:
			if (who)
				free_uid(user);
			break;
		default:
			ret = -EINVAL;
	}
Пример #25
0
static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
	struct bnep_connlist_req cl;
	struct bnep_connadd_req  ca;
	struct bnep_conndel_req  cd;
	struct bnep_conninfo ci;
	struct socket *nsock;
	void __user *argp = (void __user *)arg;
	int err;

	BT_DBG("cmd %x arg %lx", cmd, arg);

	switch (cmd) {
	case BNEPCONNADD:
		if (!capable(CAP_NET_ADMIN))
			return -EACCES;

		if (copy_from_user(&ca, argp, sizeof(ca)))
			return -EFAULT;

		nsock = sockfd_lookup(ca.sock, &err);
		if (!nsock)
			return err;

		if (nsock->sk->sk_state != BT_CONNECTED) {
			sockfd_put(nsock);
			return -EBADFD;
		}
		ca.device[sizeof(ca.device)-1] = 0;

		err = bnep_add_connection(&ca, nsock);
		if (!err) {
			if (copy_to_user(argp, &ca, sizeof(ca)))
				err = -EFAULT;
		} else
			sockfd_put(nsock);

		return err;

	case BNEPCONNDEL:
		if (!capable(CAP_NET_ADMIN))
			return -EACCES;

		if (copy_from_user(&cd, argp, sizeof(cd)))
			return -EFAULT;

		return bnep_del_connection(&cd);

	case BNEPGETCONNLIST:
		if (copy_from_user(&cl, argp, sizeof(cl)))
			return -EFAULT;

		if (cl.cnum <= 0)
			return -EINVAL;

		err = bnep_get_connlist(&cl);
		if (!err && copy_to_user(argp, &cl, sizeof(cl)))
			return -EFAULT;

		return err;

	case BNEPGETCONNINFO:
		if (copy_from_user(&ci, argp, sizeof(ci)))
			return -EFAULT;

		err = bnep_get_conninfo(&ci);
		if (!err && copy_to_user(argp, &ci, sizeof(ci)))
			return -EFAULT;

		return err;

	case BNEPEXTENSION: {
			struct bnep_extension_req ext;
			struct sk_buff *skb;
			void __user *datap;

			if (copy_from_user(&ext, argp, sizeof(ext)))
				return -EFAULT;

			skb = alloc_skb(ext.data_len, GFP_KERNEL);
			if (!skb)
				return -ENOMEM;

			datap = (void __user *)((__u8*)argp + sizeof(ext));
			if (copy_from_user(skb_put(skb, ext.data_len), datap,
					ext.data_len)) {
				err = -EFAULT;
				goto ext_fin;
			}

			err = bnep_extension_req(ext.dst, skb);

ext_fin:
			kfree_skb(skb);
		}
		return err;

	default:
		return -EINVAL;
	}

	return 0;
}
Пример #26
0
int
xfs_setattr_nonsize(
	struct xfs_inode	*ip,
	struct iattr		*iattr,
	int			flags)
{
	xfs_mount_t		*mp = ip->i_mount;
	struct inode		*inode = VFS_I(ip);
	int			mask = iattr->ia_valid;
	xfs_trans_t		*tp;
	int			error;
	uid_t			uid = 0, iuid = 0;
	gid_t			gid = 0, igid = 0;
	struct xfs_dquot	*udqp = NULL, *gdqp = NULL;
	struct xfs_dquot	*olddquot1 = NULL, *olddquot2 = NULL;

	trace_xfs_setattr(ip);

	if (mp->m_flags & XFS_MOUNT_RDONLY)
		return XFS_ERROR(EROFS);

	if (XFS_FORCED_SHUTDOWN(mp))
		return XFS_ERROR(EIO);

	error = -inode_change_ok(inode, iattr);
	if (error)
		return XFS_ERROR(error);

	ASSERT((mask & ATTR_SIZE) == 0);

	/*
	 * If disk quotas is on, we make sure that the dquots do exist on disk,
	 * before we start any other transactions. Trying to do this later
	 * is messy. We don't care to take a readlock to look at the ids
	 * in inode here, because we can't hold it across the trans_reserve.
	 * If the IDs do change before we take the ilock, we're covered
	 * because the i_*dquot fields will get updated anyway.
	 */
	if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
		uint	qflags = 0;

		if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
			uid = iattr->ia_uid;
			qflags |= XFS_QMOPT_UQUOTA;
		} else {
			uid = ip->i_d.di_uid;
		}
		if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
			gid = iattr->ia_gid;
			qflags |= XFS_QMOPT_GQUOTA;
		}  else {
			gid = ip->i_d.di_gid;
		}

		/*
		 * We take a reference when we initialize udqp and gdqp,
		 * so it is important that we never blindly double trip on
		 * the same variable. See xfs_create() for an example.
		 */
		ASSERT(udqp == NULL);
		ASSERT(gdqp == NULL);
		error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
					 qflags, &udqp, &gdqp);
		if (error)
			return error;
	}

	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
	error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
	if (error)
		goto out_dqrele;

	xfs_ilock(ip, XFS_ILOCK_EXCL);

	/*
	 * Change file ownership.  Must be the owner or privileged.
	 */
	if (mask & (ATTR_UID|ATTR_GID)) {
		/*
		 * These IDs could have changed since we last looked at them.
		 * But, we're assured that if the ownership did change
		 * while we didn't have the inode locked, inode's dquot(s)
		 * would have changed also.
		 */
		iuid = ip->i_d.di_uid;
		igid = ip->i_d.di_gid;
		gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
		uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;

		/*
		 * Do a quota reservation only if uid/gid is actually
		 * going to change.
		 */
		if (XFS_IS_QUOTA_RUNNING(mp) &&
		    ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
		     (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
			ASSERT(tp);
			error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
						capable(CAP_FOWNER) ?
						XFS_QMOPT_FORCE_RES : 0);
			if (error)	/* out of quota */
				goto out_trans_cancel;
		}
	}

	xfs_trans_ijoin(tp, ip, 0);

	/*
	 * Change file ownership.  Must be the owner or privileged.
	 */
	if (mask & (ATTR_UID|ATTR_GID)) {
		/*
		 * CAP_FSETID overrides the following restrictions:
		 *
		 * The set-user-ID and set-group-ID bits of a file will be
		 * cleared upon successful return from chown()
		 */
		if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
		    !capable(CAP_FSETID))
			ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);

		/*
		 * Change the ownerships and register quota modifications
		 * in the transaction.
		 */
		if (iuid != uid) {
			if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
				ASSERT(mask & ATTR_UID);
				ASSERT(udqp);
				olddquot1 = xfs_qm_vop_chown(tp, ip,
							&ip->i_udquot, udqp);
			}
			ip->i_d.di_uid = uid;
			inode->i_uid = uid;
		}
		if (igid != gid) {
			if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
				ASSERT(!XFS_IS_PQUOTA_ON(mp));
				ASSERT(mask & ATTR_GID);
				ASSERT(gdqp);
				olddquot2 = xfs_qm_vop_chown(tp, ip,
							&ip->i_gdquot, gdqp);
			}
			ip->i_d.di_gid = gid;
			inode->i_gid = gid;
		}
	}

	/*
	 * Change file access modes.
	 */
	if (mask & ATTR_MODE) {
		umode_t mode = iattr->ia_mode;

		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
			mode &= ~S_ISGID;

		ip->i_d.di_mode &= S_IFMT;
		ip->i_d.di_mode |= mode & ~S_IFMT;

		inode->i_mode &= S_IFMT;
		inode->i_mode |= mode & ~S_IFMT;
	}

	/*
	 * Change file access or modified times.
	 */
	if (mask & ATTR_ATIME) {
		inode->i_atime = iattr->ia_atime;
		ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
		ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
		ip->i_update_core = 1;
	}
	if (mask & ATTR_CTIME) {
		inode->i_ctime = iattr->ia_ctime;
		ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
		ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
		ip->i_update_core = 1;
	}
	if (mask & ATTR_MTIME) {
		inode->i_mtime = iattr->ia_mtime;
		ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
		ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
		ip->i_update_core = 1;
	}

	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

	XFS_STATS_INC(xs_ig_attrchg);

	if (mp->m_flags & XFS_MOUNT_WSYNC)
		xfs_trans_set_sync(tp);
	error = xfs_trans_commit(tp, 0);

	xfs_iunlock(ip, XFS_ILOCK_EXCL);

	/*
	 * Release any dquot(s) the inode had kept before chown.
	 */
	xfs_qm_dqrele(olddquot1);
	xfs_qm_dqrele(olddquot2);
	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);

	if (error)
		return XFS_ERROR(error);

	/*
	 * XXX(hch): Updating the ACL entries is not atomic vs the i_mode
	 * 	     update.  We could avoid this with linked transactions
	 * 	     and passing down the transaction pointer all the way
	 *	     to attr_set.  No previous user of the generic
	 * 	     Posix ACL code seems to care about this issue either.
	 */
	if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
		error = -xfs_acl_chmod(inode);
		if (error)
			return XFS_ERROR(error);
	}

	return 0;

out_trans_cancel:
	xfs_trans_cancel(tp, 0);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_dqrele:
	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	return error;
}
Пример #27
0
static int proc_ide_write_settings(struct file *file, const char __user *buffer,
				   unsigned long count, void *data)
{
	ide_drive_t	*drive = (ide_drive_t *) data;
	char		name[MAX_LEN + 1];
	int		for_real = 0;
	unsigned long	n;
	ide_settings_t	*setting;
	char *buf, *s;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	proc_ide_settings_warn();

	if (count >= PAGE_SIZE)
		return -EINVAL;

	s = buf = (char *)__get_free_page(GFP_USER);
	if (!buf)
		return -ENOMEM;

	if (copy_from_user(buf, buffer, count)) {
		free_page((unsigned long)buf);
		return -EFAULT;
	}

	buf[count] = '\0';

	/*
	 * Skip over leading whitespace
	 */
	while (count && isspace(*s)) {
		--count;
		++s;
	}
	/*
	 * Do one full pass to verify all parameters,
	 * then do another to actually write the new settings.
	 */
	do {
		char *p = s;
		n = count;
		while (n > 0) {
			unsigned val;
			char *q = p;

			while (n > 0 && *p != ':') {
				--n;
				p++;
			}
			if (*p != ':')
				goto parse_error;
			if (p - q > MAX_LEN)
				goto parse_error;
			memcpy(name, q, p - q);
			name[p - q] = 0;

			if (n > 0) {
				--n;
				p++;
			} else
				goto parse_error;

			val = simple_strtoul(p, &q, 10);
			n -= q - p;
			p = q;
			if (n > 0 && !isspace(*p))
				goto parse_error;
			while (n > 0 && isspace(*p)) {
				--n;
				++p;
			}

			down(&ide_setting_sem);
			setting = ide_find_setting_by_name(drive, name);
			if (!setting)
			{
				up(&ide_setting_sem);
				goto parse_error;
			}
			if (for_real)
				ide_write_setting(drive, setting, val * setting->div_factor / setting->mul_factor);
			up(&ide_setting_sem);
		}
	} while (!for_real++);
	free_page((unsigned long)buf);
	return count;
parse_error:
	free_page((unsigned long)buf);
	printk("proc_ide_write_settings(): parse error\n");
	return -EINVAL;
}
Пример #28
0
STATIC      status_t
c4_ioctl (struct net_device * ndev, struct ifreq * ifr, int cmd)
{
    ci_t       *ci;
    void       *data;
    int         iocmd, iolen;
    status_t    ret;
    static struct data
    {
        union
        {
            u_int8_t c;
            u_int32_t i;
            struct sbe_brd_info bip;
            struct sbe_drv_info dip;
            struct sbe_iid_info iip;
            struct sbe_brd_addr bap;
            struct sbecom_chan_stats stats;
            struct sbecom_chan_param param;
            struct temux_card_stats cards;
            struct sbecom_card_param cardp;
            struct sbecom_framer_param frp;
        }           u;
    }           arg;

    if (!capable (CAP_SYS_ADMIN))
        return -EPERM;
    if (cmd != SIOCDEVPRIVATE + 15)
        return -EINVAL;
    if (!(ci = get_ci_by_dev (ndev)))
        return -EINVAL;
    if (ci->state != C_RUNNING)
        return -ENODEV;
    if (copy_from_user (&iocmd, ifr->ifr_data, sizeof (iocmd)))
        return -EFAULT;
#if 0
    if (copy_from_user (&len, ifr->ifr_data + sizeof (iocmd), sizeof (len)))
        return -EFAULT;
#endif

#if 0
    pr_info("c4_ioctl: iocmd %x, dir %x type %x nr %x iolen %d.\n", iocmd,
            _IOC_DIR (iocmd), _IOC_TYPE (iocmd), _IOC_NR (iocmd),
            _IOC_SIZE (iocmd));
#endif
    iolen = _IOC_SIZE (iocmd);
    data = ifr->ifr_data + sizeof (iocmd);
    if (copy_from_user (&arg, data, iolen))
        return -EFAULT;

    ret = 0;
    switch (iocmd)
    {
    case SBE_IOC_PORT_GET:
        //pr_info(">> SBE_IOC_PORT_GET Ioctl...\n");
        ret = do_get_port (ndev, data);
        break;
    case SBE_IOC_PORT_SET:
        //pr_info(">> SBE_IOC_PORT_SET Ioctl...\n");
        ret = do_set_port (ndev, data);
        break;
    case SBE_IOC_CHAN_GET:
        //pr_info(">> SBE_IOC_CHAN_GET Ioctl...\n");
        ret = do_get_chan (ndev, data);
        break;
    case SBE_IOC_CHAN_SET:
        //pr_info(">> SBE_IOC_CHAN_SET Ioctl...\n");
        ret = do_set_chan (ndev, data);
        break;
    case C4_DEL_CHAN:
        //pr_info(">> C4_DEL_CHAN Ioctl...\n");
        ret = do_del_chan (ndev, data);
        break;
    case SBE_IOC_CHAN_NEW:
        ret = do_create_chan (ndev, data);
        break;
    case SBE_IOC_CHAN_GET_STAT:
        ret = do_get_chan_stats (ndev, data);
        break;
    case SBE_IOC_LOGLEVEL:
        ret = do_set_loglevel (ndev, data);
        break;
    case SBE_IOC_RESET_DEV:
        ret = do_reset (ndev, data);
        break;
    case SBE_IOC_CHAN_DEL_STAT:
        ret = do_reset_chan_stats (ndev, data);
        break;
    case C4_LOOP_PORT:
        ret = do_port_loop (ndev, data);
        break;
    case C4_RW_FRMR:
        ret = do_framer_rw (ndev, data);
        break;
    case C4_RW_MSYC:
        ret = do_musycc_rw (ndev, data);
        break;
    case C4_RW_PLD:
        ret = do_pld_rw (ndev, data);
        break;
    case SBE_IOC_IID_GET:
        ret = (iolen == sizeof (struct sbe_iid_info)) ? c4_get_iidinfo (ci, &arg.u.iip) : -EFAULT;
        if (ret == 0)               /* no error, copy data */
            if (copy_to_user (data, &arg, iolen))
                return -EFAULT;
        break;
    default:
        //pr_info(">> c4_ioctl: EINVAL - unknown iocmd <%x>\n", iocmd);
        ret = -EINVAL;
        break;
    }
    return mkret (ret);
}
Пример #29
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * Do some preliminary argument and permissions checking before we
	 * actually start allocating stuff
	 */
	if (clone_flags & CLONE_NEWUSER) {
		if (clone_flags & CLONE_THREAD)
			return -EINVAL;
		/* hopefully this check will go away when userns support is
		 * complete
		 */
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
				!capable(CAP_SETGID))
			return -EPERM;
	}

	/*
	 * We hope to recycle these flags after 2.6.26
	 */
	if (unlikely(clone_flags & CLONE_STOPPED)) {
		static int __read_mostly count = 100;

		if (count > 0 && printk_ratelimit()) {
			char comm[TASK_COMM_LEN];

			count--;
			printk(KERN_INFO "fork(): process `%s' used deprecated "
					"clone flags 0x%lx\n",
				get_task_comm(comm, current),
				clone_flags & CLONE_STOPPED);
		}
	}

	/*
	 * When called from kernel_thread, don't do user tracing stuff.
	 */
	if (likely(user_mode(regs)))
		trace = tracehook_prepare_clone(clone_flags);

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		audit_finish_fork(p);
		tracehook_report_clone(trace, regs, clone_flags, nr, p);

		/*
		 * We set PF_STARTING at creation in case tracing wants to
		 * use this to distinguish a fully live task from one that
		 * hasn't gotten to tracehook_report_clone() yet.  Now we
		 * clear it and set the child going.
		 */
		p->flags &= ~PF_STARTING;

		if (unlikely(clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
			__set_task_state(p, TASK_STOPPED);
		} else {
			wake_up_new_task(p, clone_flags);
		}

		tracehook_report_clone_complete(trace, regs,
						clone_flags, nr, p);

		if (clone_flags & CLONE_VFORK) {
			freezer_do_not_count();
			wait_for_completion(&vfork);
			freezer_count();
			tracehook_report_vfork_done(p, nr);
		}
	} else {
		nr = PTR_ERR(p);
	}
	return nr;
}
Пример #30
0
asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
{
	int err, version;
	struct msg_queue *msq;
	struct msq_setbuf setbuf;
	struct kern_ipc_perm *ipcp;
	
	if (msqid < 0 || cmd < 0)
		return -EINVAL;

	version = ipc_parse_version(&cmd);

	switch (cmd) {
	case IPC_INFO: 
	case MSG_INFO: 
	{ 
		struct msginfo msginfo;
		int max_id;
		if (!buf)
			return -EFAULT;
		/* We must not return kernel stack data.
		 * due to padding, it's not enough
		 * to set all member fields.
		 */

		err = security_msg_queue_msgctl(NULL, cmd);
		if (err)
			return err;

		memset(&msginfo,0,sizeof(msginfo));	
		msginfo.msgmni = msg_ctlmni;
		msginfo.msgmax = msg_ctlmax;
		msginfo.msgmnb = msg_ctlmnb;
		msginfo.msgssz = MSGSSZ;
		msginfo.msgseg = MSGSEG;
		down(&msg_ids.sem);
		if (cmd == MSG_INFO) {
			msginfo.msgpool = msg_ids.in_use;
			msginfo.msgmap = atomic_read(&msg_hdrs);
			msginfo.msgtql = atomic_read(&msg_bytes);
		} else {
			msginfo.msgmap = MSGMAP;
			msginfo.msgpool = MSGPOOL;
			msginfo.msgtql = MSGTQL;
		}
		max_id = msg_ids.max_id;
		up(&msg_ids.sem);
		if (copy_to_user (buf, &msginfo, sizeof(struct msginfo)))
			return -EFAULT;
		return (max_id < 0) ? 0: max_id;
	}
	case MSG_STAT:
	case IPC_STAT:
	{
		struct msqid64_ds tbuf;
		int success_return;
		if (!buf)
			return -EFAULT;
		if(cmd == MSG_STAT && msqid >= msg_ids.size)
			return -EINVAL;

		memset(&tbuf,0,sizeof(tbuf));

		msq = msg_lock(msqid);
		if (msq == NULL)
			return -EINVAL;

		if(cmd == MSG_STAT) {
			success_return = msg_buildid(msqid, msq->q_perm.seq);
		} else {
			err = -EIDRM;
			if (msg_checkid(msq,msqid))
				goto out_unlock;
			success_return = 0;
		}
		err = -EACCES;
		if (ipcperms (&msq->q_perm, S_IRUGO))
			goto out_unlock;

		err = security_msg_queue_msgctl(msq, cmd);
		if (err)
			goto out_unlock;

		kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
		tbuf.msg_stime  = msq->q_stime;
		tbuf.msg_rtime  = msq->q_rtime;
		tbuf.msg_ctime  = msq->q_ctime;
		tbuf.msg_cbytes = msq->q_cbytes;
		tbuf.msg_qnum   = msq->q_qnum;
		tbuf.msg_qbytes = msq->q_qbytes;
		tbuf.msg_lspid  = msq->q_lspid;
		tbuf.msg_lrpid  = msq->q_lrpid;
		msg_unlock(msq);
		if (copy_msqid_to_user(buf, &tbuf, version))
			return -EFAULT;
		return success_return;
	}
	case IPC_SET:
		if (!buf)
			return -EFAULT;
		if (copy_msqid_from_user (&setbuf, buf, version))
			return -EFAULT;
		break;
	case IPC_RMID:
		break;
	default:
		return  -EINVAL;
	}

	down(&msg_ids.sem);
	msq = msg_lock(msqid);
	err=-EINVAL;
	if (msq == NULL)
		goto out_up;

	err = -EIDRM;
	if (msg_checkid(msq,msqid))
		goto out_unlock_up;
	ipcp = &msq->q_perm;
	err = -EPERM;
	if (current->euid != ipcp->cuid && 
	    current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
	    /* We _could_ check for CAP_CHOWN above, but we don't */
		goto out_unlock_up;

	err = security_msg_queue_msgctl(msq, cmd);
	if (err)
		goto out_unlock_up;

	switch (cmd) {
	case IPC_SET:
	{
		if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
			goto out_unlock_up;

		msq->q_qbytes = setbuf.qbytes;

		ipcp->uid = setbuf.uid;
		ipcp->gid = setbuf.gid;
		ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | 
			(S_IRWXUGO & setbuf.mode);
		msq->q_ctime = get_seconds();
		/* sleeping receivers might be excluded by
		 * stricter permissions.
		 */
		expunge_all(msq,-EAGAIN);
		/* sleeping senders might be able to send
		 * due to a larger queue size.
		 */
		ss_wakeup(&msq->q_senders,0);
		msg_unlock(msq);
		break;
	}
	case IPC_RMID:
		freeque (msq, msqid); 
		break;
	}
	err = 0;
out_up:
	up(&msg_ids.sem);
	return err;
out_unlock_up:
	msg_unlock(msq);
	goto out_up;
out_unlock:
	msg_unlock(msq);
	return err;
}