Пример #1
0
static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
		char __user *optval, unsigned int optlen)
{
	struct dccp_sock *dp = dccp_sk(sk);
	int val, err = 0;

	switch (optname) {
	case DCCP_SOCKOPT_PACKET_SIZE:
		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
		return 0;
	case DCCP_SOCKOPT_CHANGE_L:
	case DCCP_SOCKOPT_CHANGE_R:
		DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
		return 0;
	case DCCP_SOCKOPT_CCID:
	case DCCP_SOCKOPT_RX_CCID:
	case DCCP_SOCKOPT_TX_CCID:
		return dccp_setsockopt_ccid(sk, optname, optval, optlen);
	}

	if (optlen < (int)sizeof(int))
		return -EINVAL;

	if (get_user(val, (int __user *)optval))
		return -EFAULT;

	if (optname == DCCP_SOCKOPT_SERVICE)
		return dccp_setsockopt_service(sk, val, optval, optlen);

	lock_sock(sk);
	switch (optname) {
	case DCCP_SOCKOPT_SERVER_TIMEWAIT:
		if (dp->dccps_role != DCCP_ROLE_SERVER)
			err = -EOPNOTSUPP;
		else
			dp->dccps_server_timewait = (val != 0);
		break;
	case DCCP_SOCKOPT_SEND_CSCOV:
		err = dccp_setsockopt_cscov(sk, val, false);
		break;
	case DCCP_SOCKOPT_RECV_CSCOV:
		err = dccp_setsockopt_cscov(sk, val, true);
		break;
	case DCCP_SOCKOPT_QPOLICY_ID:
		if (sk->sk_state != DCCP_CLOSED)
			err = -EISCONN;
		else if (val < 0 || val >= DCCPQ_POLICY_MAX)
			err = -EINVAL;
		else
			dp->dccps_qpolicy = val;
		break;
	case DCCP_SOCKOPT_QPOLICY_TXQLEN:
		if (val < 0)
			err = -EINVAL;
		else
			dp->dccps_tx_qlen = val;
		break;
	default:
		err = -ENOPROTOOPT;
		break;
	}
	release_sock(sk);

	return err;
}
Пример #2
0
static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, int __user *optlen)
{
	struct dccp_sock *dp;
	int val, len;

	if (get_user(len, optlen))
		return -EFAULT;

	if (len < (int)sizeof(int))
		return -EINVAL;

	dp = dccp_sk(sk);

	switch (optname) {
	case DCCP_SOCKOPT_PACKET_SIZE:
		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
		return 0;
	case DCCP_SOCKOPT_SERVICE:
		return dccp_getsockopt_service(sk, len,
					       (__be32 __user *)optval, optlen);
	case DCCP_SOCKOPT_GET_CUR_MPS:
		val = dp->dccps_mss_cache;
		break;
	case DCCP_SOCKOPT_AVAILABLE_CCIDS:
		return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
	case DCCP_SOCKOPT_TX_CCID:
		val = ccid_get_current_tx_ccid(dp);
		if (val < 0)
			return -ENOPROTOOPT;
		break;
	case DCCP_SOCKOPT_RX_CCID:
		val = ccid_get_current_rx_ccid(dp);
		if (val < 0)
			return -ENOPROTOOPT;
		break;
	case DCCP_SOCKOPT_SERVER_TIMEWAIT:
		val = dp->dccps_server_timewait;
		break;
	case DCCP_SOCKOPT_SEND_CSCOV:
		val = dp->dccps_pcslen;
		break;
	case DCCP_SOCKOPT_RECV_CSCOV:
		val = dp->dccps_pcrlen;
		break;
	case DCCP_SOCKOPT_QPOLICY_ID:
		val = dp->dccps_qpolicy;
		break;
	case DCCP_SOCKOPT_QPOLICY_TXQLEN:
		val = dp->dccps_tx_qlen;
		break;
	case 128 ... 191:
		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
					     len, (u32 __user *)optval, optlen);
	case 192 ... 255:
		return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
					     len, (u32 __user *)optval, optlen);
	default:
		return -ENOPROTOOPT;
	}

	len = sizeof(val);
	if (put_user(len, optlen) || copy_to_user(optval, &val, len))
		return -EFAULT;

	return 0;
}
Пример #3
0
long qsc_modem_ioctl(struct file *filp, unsigned int cmd,
				unsigned long arg)
{
	int status, ret = 0, retry = 0;

	if (_IOC_TYPE(cmd) != CHARM_CODE) {
		pr_err("%s: invalid ioctl code\n", __func__);
		return -EINVAL;
	}

	pr_debug("%s: Entering ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
	switch (cmd) {
	case WAKE_CHARM:
		if( qsc_bootup_state == QSC_BOOT_MDM_BOOTLOAER_IRQ_REGISTERED || qsc_bootup_state == QSC_BOOT_MDM_BOOTLOAER_IRQ_RECEIVED )
		{
			do {
				retry++;
				ret = wait_for_completion_interruptible_timeout(&qsc_boot_after_mdm_booloader_irq, msecs_to_jiffies(10000));
			} while ( ret == -ERESTARTSYS && retry <= 3); 
		}

		if( qsc_bootup_state == QSC_BOOT_MDM_BOOTLOAER_IRQ_RECEIVED || qsc_bootup_state == QSC_BOOT_MDM_BOOTLOAER_IRQ_BOOTED )
		{
			retry = 0;
			while( qsc_bootup_state != QSC_BOOT_MDM_BOOTLOAER_IRQ_BOOTED && retry < QSC_POWON_MDM_HELPER_DELAY_TIMES )
			{
				retry++;
				msleep(QSC_POWON_MDM_HELPER_DELAY_INTERVAL_MS);
			}
			if( qsc_bootup_state == QSC_BOOT_MDM_BOOTLOAER_IRQ_RECEIVED )
			{
				pr_err("%s: QSC boot by mdm bootloader irq took too long time.\n", __func__);
			}
			else
			{
				pr_info("%s: QSC has been booted by mdm bootloader irq.\n", __func__);
			}
		}
		else
		{
			qsc_bootup_state = QSC_BOOT_MDM_HELPER_BOOTING;
			pr_info("%s: Powering on qsc by WAKE_CHARM\n", __func__);
			mdm_drv->ops->power_on_mdm_cb(mdm_drv);
			qsc_bootup_state = QSC_BOOT_MDM_HELPER_BOOTED;
		}
		
		INIT_COMPLETION(qsc_boot_after_mdm_booloader_irq);
		break;
	case CHECK_FOR_BOOT:
		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
			put_user(1, (unsigned long __user *) arg);
		else
			put_user(0, (unsigned long __user *) arg);
		break;
	case NORMAL_BOOT_DONE:
		pr_debug("%s: check if qsc is booted up\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status) {
			pr_debug("%s: normal boot failed\n", __func__);
			mdm_drv->mdm_boot_status = -EIO;
		} else {
			pr_info("%s: normal boot done\n", __func__);
			mdm_drv->mdm_boot_status = 0;
		}
		mdm_drv->mdm_ready = 1;

		if (mdm_drv->ops->normal_boot_done_cb != NULL)
			mdm_drv->ops->normal_boot_done_cb(mdm_drv);

		if (!first_boot)
			complete(&qsc_boot);
		else
			first_boot = 0;

		if (!status && gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
			schedule_delayed_work(&qsc2ap_status_check_work,
				msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
		break;
	case RAM_DUMP_DONE:
		pr_debug("%s: qsc done collecting RAM dumps\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status)
			mdm_drv->mdm_ram_dump_status = -EIO;
		else {
			pr_info("%s: ramdump collection completed\n", __func__);
			mdm_drv->mdm_ram_dump_status = 0;
		}
		complete(&qsc_ram_dumps);
		break;
	case WAIT_FOR_RESTART:
		pr_debug("%s: wait for qsc to need images reloaded\n",
				__func__);
		ret = wait_for_completion_interruptible(&qsc_needs_reload);
		if (!ret)
			put_user(mdm_drv->boot_type,
					 (unsigned long __user *) arg);
		INIT_COMPLETION(qsc_needs_reload);
		break;
	case FORCE_DLOAD:
		pr_info("%s: Force QSC enter DLOAD.\n", __func__);
		if( gpio_get_value(mdm_drv->mdm2ap_vddmin_gpio) )
		{
			pr_info("%s: HW Reset case, QSC already in Dload mode.\n", __func__);
			print_qsc_gpio();
		}
		else
		{
			gpio_direction_output(mdm_drv->ap2mdm_vddmin_gpio, 1);
			print_qsc_gpio();
			mdm_drv->ops->atomic_reset_mdm_cb(mdm_drv);
			mdelay(10);
		}
		break;
	case GET_DLOAD_STATUS:
		pr_debug("getting status of qsc2ap_errfatal_gpio\n");
		if (gpio_get_value(mdm_drv->mdm2ap_errfatal_gpio) == 1 &&
			!mdm_drv->mdm_ready)
			put_user(1, (unsigned long __user *) arg);
		else
			put_user(0, (unsigned long __user *) arg);
		break;
	case IMAGE_UPGRADE:
		pr_debug("%s Image upgrade ioctl recieved\n", __func__);
		if (mdm_drv->pdata->image_upgrade_supported &&
				mdm_drv->ops->image_upgrade_cb) {
			get_user(status, (unsigned long __user *) arg);
			mdm_drv->ops->image_upgrade_cb(mdm_drv, status);
		} else
			pr_debug("%s Image upgrade not supported\n", __func__);
		break;
	case SHUTDOWN_CHARM:
		if (!mdm_drv->pdata->send_shdn)
			break;
		mdm_drv->mdm_ready = 0;
		if (qsc_debug_mask & MDM_DEBUG_MASK_SHDN_LOG)
			pr_info("Sending shutdown request to mdm\n");
		ret = sysmon_send_shutdown(SYSMON_SS_EXT_MODEM);
		if (ret)
			pr_err("%s: Graceful shutdown of the external modem failed, ret = %d\n",
				   __func__, ret);
		break;
	default:
		pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
		ret = -EINVAL;
		break;
	}

	return ret;
}
Пример #4
0
static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
			     unsigned long arg)
{
	struct ts72xx_wdt *wdt = file->private_data;
	void __user *argp = (void __user *)arg;
	int __user *p = (int __user *)argp;
	int error = 0;

	if (mutex_lock_interruptible(&wdt->lock))
		return -ERESTARTSYS;

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		if (copy_to_user(argp, &winfo, sizeof(winfo)))
			error = -EFAULT;
		break;

	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		error = put_user(0, p);
		break;

	case WDIOC_KEEPALIVE:
		ts72xx_wdt_kick(wdt);
		break;

	case WDIOC_SETOPTIONS: {
		int options;

		error = get_user(options, p);
		if (error)
			break;

		error = -EINVAL;

		if ((options & WDIOS_DISABLECARD) != 0) {
			ts72xx_wdt_stop(wdt);
			error = 0;
		}
		if ((options & WDIOS_ENABLECARD) != 0) {
			ts72xx_wdt_start(wdt);
			error = 0;
		}

		break;
	}

	case WDIOC_SETTIMEOUT: {
		int new_timeout;
		int regval;

		error = get_user(new_timeout, p);
		if (error)
			break;

		regval = timeout_to_regval(new_timeout);
		if (regval < 0) {
			error = regval;
			break;
		}
		ts72xx_wdt_stop(wdt);
		wdt->regval = regval;
		ts72xx_wdt_start(wdt);

		/*FALLTHROUGH*/
	}

	case WDIOC_GETTIMEOUT:
		error = put_user(regval_to_timeout(wdt->regval), p);
		break;

	default:
		error = -ENOTTY;
		break;
	}

	mutex_unlock(&wdt->lock);
	return error;
}
Пример #5
0
static long snd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct snd_set_device_msg dmsg;
	struct snd_set_volume_msg vmsg;
	struct snd_avc_ctl_msg avc_msg;
	struct snd_agc_ctl_msg agc_msg;

//LGE_SND_UPDATE_S [
	struct snd_72xx_rpc_extcmd_msg rpc_extcmd_msg;
	struct snd_audio_cal_msg cal_msg;
//LGE_SND_UPDATE_E ]

	struct msm_snd_device_config dev;
	struct msm_snd_volume_config vol;

//LGE_SND_UPDATE_S [
    struct msm_snd_72xx_rpc_extcmd_config rpc_extcmd_conf;
	struct msm_snd_audio_cal_config snd_audio_cal_conf;
//LGE_SND_UPDATE_E ]

	struct snd_ctxt *snd = file->private_data;
	int rc = 0;

	uint32_t avc, agc;

	mutex_lock(&snd->lock);
	switch (cmd) {
	case SND_SET_DEVICE:
		if (copy_from_user(&dev, (void __user *) arg, sizeof(dev))) {
			MM_ERR("set device: invalid pointer\n");
			rc = -EFAULT;
			break;
		}

		dmsg.args.device = cpu_to_be32(dev.device);
		dmsg.args.ear_mute = cpu_to_be32(dev.ear_mute);
		dmsg.args.mic_mute = cpu_to_be32(dev.mic_mute);
		if (check_mute(dev.ear_mute) < 0 ||
				check_mute(dev.mic_mute) < 0) {
			MM_ERR("set device: invalid mute status\n");
			rc = -EINVAL;
			break;
		}
		dmsg.args.cb_func = -1;
		dmsg.args.client_data = 0;

		MM_INFO("snd_set_device %d %d %d\n", dev.device,
				dev.ear_mute, dev.mic_mute);

		rc = msm_rpc_call(snd->ept,
			SND_SET_DEVICE_PROC,
			&dmsg, sizeof(dmsg), 5 * HZ);
		break;

	case SND_SET_VOLUME:
		if (copy_from_user(&vol, (void __user *) arg, sizeof(vol))) {
			MM_ERR("set volume: invalid pointer\n");
			rc = -EFAULT;
			break;
		}

		vmsg.args.device = cpu_to_be32(vol.device);
		vmsg.args.method = cpu_to_be32(vol.method);
		if (vol.method != SND_METHOD_VOICE) {
			MM_ERR("set volume: invalid method\n");
			rc = -EINVAL;
			break;
		}

		vmsg.args.volume = cpu_to_be32(vol.volume);
		vmsg.args.cb_func = -1;
		vmsg.args.client_data = 0;

		MM_INFO("snd_set_volume %d %d %d\n", vol.device,
				vol.method, vol.volume);

		rc = msm_rpc_call(snd->ept,
			SND_SET_VOLUME_PROC,
			&vmsg, sizeof(vmsg), 5 * HZ);
		break;

	case SND_AVC_CTL:
		if (get_user(avc, (uint32_t __user *) arg)) {
			rc = -EFAULT;
			break;
		} else if ((avc != 1) && (avc != 0)) {
			rc = -EINVAL;
			break;
		}

		avc_msg.args.avc_ctl = cpu_to_be32(avc);
		avc_msg.args.cb_func = -1;
		avc_msg.args.client_data = 0;

		MM_INFO("snd_avc_ctl %d\n", avc);

		rc = msm_rpc_call(snd->ept,
			SND_AVC_CTL_PROC,
			&avc_msg, sizeof(avc_msg), 5 * HZ);
		break;

	case SND_AGC_CTL:
		if (get_user(agc, (uint32_t __user *) arg)) {
			rc = -EFAULT;
			break;
		} else if ((agc != 1) && (agc != 0)) {
			rc = -EINVAL;
			break;
		}
		agc_msg.args.agc_ctl = cpu_to_be32(agc);
		agc_msg.args.cb_func = -1;
		agc_msg.args.client_data = 0;

		MM_INFO("snd_agc_ctl %d\n", agc);

		rc = msm_rpc_call(snd->ept,
			SND_AGC_CTL_PROC,
			&agc_msg, sizeof(agc_msg), 5 * HZ);
		break;

	case SND_GET_NUM_ENDPOINTS:
		if (copy_to_user((void __user *)arg,
				&snd->snd_epts->num, sizeof(unsigned))) {
			MM_ERR("get endpoint: invalid pointer\n");
			rc = -EFAULT;
		}
		break;

	case SND_GET_ENDPOINT:
		rc = get_endpoint(snd, arg);
		break;

//LGE_SND_UPDATE_S [
	case SND_72XX_RPC_EXTCMD:
		if (copy_from_user(&rpc_extcmd_conf, (void __user *) arg, sizeof(rpc_extcmd_conf))) {
			MM_ERR("SND_72XX_RPC_EXTCMD: invalid pointer\n");
			rc = -EFAULT;
		break;
	}

		rpc_extcmd_msg.args.rpc_extcmd = cpu_to_be32(rpc_extcmd_conf.rpc_extcmd);
		rpc_extcmd_msg.args.option = cpu_to_be32(rpc_extcmd_conf.option);

		rpc_extcmd_msg.args.cb_func = -1;
		rpc_extcmd_msg.args.client_data = 0;

		MM_INFO("SND_72XX_RPC_EXTCMD %d %d \n", rpc_extcmd_conf.rpc_extcmd, rpc_extcmd_conf.option);

		rc = msm_rpc_call_reply(snd->ept,
			SND_72XX_RPC_EXTCMD_PROC,
			&rpc_extcmd_msg, sizeof(rpc_extcmd_msg), &(extcmd_msg_rep), sizeof (extcmd_msg_rep), 5 * HZ);
					
		if (rc < 0){
			MM_ERR("rpc err because");
		}
		else {
			rpc_extcmd_conf.result = be32_to_cpu(extcmd_msg_rep.result);
			MM_INFO("snd 72xx rpc extcmd result=[%d]\n", rpc_extcmd_conf.result);
			if (copy_to_user((void __user *)arg, &rpc_extcmd_conf, sizeof(rpc_extcmd_conf))) {
				MM_INFO("snd_ioctl get voccal: invalid write pointer.\n");
				rc = -EFAULT;
			}
		}
		break;

	case SND_AUDIO_CAL:
		if (copy_from_user(&snd_audio_cal_conf, (void __user *) arg, sizeof(snd_audio_cal_conf))) {
			MM_ERR("SND_AUDIO_CAL: invalid pointer\n");
			rc = -EFAULT;
			break;
		}

		cal_msg.args.nCalType = cpu_to_be32(snd_audio_cal_conf.nCalType);
		cal_msg.args.nCmd = cpu_to_be32(snd_audio_cal_conf.nCmd);
		cal_msg.args.nDevice = cpu_to_be32(snd_audio_cal_conf.nDevice);
		cal_msg.args.nIndex = cpu_to_be32(snd_audio_cal_conf.nIndex);
		cal_msg.args.nSubIndex = cpu_to_be32(snd_audio_cal_conf.nSubIndex);
		cal_msg.args.nItem = cpu_to_be32(snd_audio_cal_conf.nItem);		

		cal_msg.args.cb_func = -1;
		cal_msg.args.client_data = 0;

		MM_INFO("SND_AUDIO_CAL %d %d %d %d %d %d\n", snd_audio_cal_conf.nCalType, snd_audio_cal_conf.nCmd,
					snd_audio_cal_conf.nDevice, snd_audio_cal_conf.nIndex, snd_audio_cal_conf.nSubIndex, snd_audio_cal_conf.nItem);

		rc = msm_rpc_call_reply(snd->ept,
			SND_AUDIO_CAL_PROC,
			&cal_msg, sizeof(cal_msg), &(cal_msg_rep), sizeof (cal_msg_rep), 5 * HZ);

		if (rc < 0){
			MM_ERR("rpc err because");
		}
		else {
			snd_audio_cal_conf.result = be32_to_cpu(cal_msg_rep.result);
			MM_INFO("snd audio cal result=[%d]\n", snd_audio_cal_conf.result);
			if (copy_to_user((void __user *)arg, &snd_audio_cal_conf, sizeof(snd_audio_cal_conf))) {
				MM_INFO("snd_ioctl get voccal: invalid write pointer.\n");
				rc = -EFAULT;
			}
		}
		break;
//LGE_SND_UPDATE_E ]

	default:
		MM_ERR("unknown command\n");
		rc = -EINVAL;
		break;
	}
	mutex_unlock(&snd->lock);

	return rc;
}
static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	int __user *p = argp;
	int new_timeout;
	static const struct watchdog_info ident = {
		.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
							| WDIOF_MAGICCLOSE,
		.firmware_version = 1,
		.identity = "W83697HF WDT",
	};

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		if (copy_to_user(argp, &ident, sizeof(ident)))
			return -EFAULT;
		break;

	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		return put_user(0, p);

	case WDIOC_SETOPTIONS:
	{
		int options, retval = -EINVAL;

		if (get_user(options, p))
			return -EFAULT;

		if (options & WDIOS_DISABLECARD) {
			wdt_disable();
			retval = 0;
		}

		if (options & WDIOS_ENABLECARD) {
			wdt_enable();
			retval = 0;
		}

		return retval;
	}

	case WDIOC_KEEPALIVE:
		wdt_ping();
		break;

	case WDIOC_SETTIMEOUT:
		if (get_user(new_timeout, p))
			return -EFAULT;
		if (wdt_set_heartbeat(new_timeout))
			return -EINVAL;
		wdt_ping();
		/* Fall */

	case WDIOC_GETTIMEOUT:
		return put_user(timeout, p);

	default:
		return -ENOTTY;
	}
	return 0;
}

static int wdt_open(struct inode *inode, struct file *file)
{
	if (test_and_set_bit(0, &wdt_is_open))
		return -EBUSY;
	/*
	 *	Activate
	 */

	wdt_enable();
	return nonseekable_open(inode, file);
}

static int wdt_close(struct inode *inode, struct file *file)
{
	if (expect_close == 42)
		wdt_disable();
	else {
		pr_crit("Unexpected close, not stopping watchdog!\n");
		wdt_ping();
	}
	expect_close = 0;
	clear_bit(0, &wdt_is_open);
	return 0;
}

/*
 *	Notifier for system down
 */

static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
	void *unused)
{
	if (code == SYS_DOWN || code == SYS_HALT)
		wdt_disable();	/* Turn the WDT off */

	return NOTIFY_DONE;
}

/*
 *	Kernel Interfaces
 */

static const struct file_operations wdt_fops = {
	.owner		= THIS_MODULE,
	.llseek		= no_llseek,
	.write		= wdt_write,
	.unlocked_ioctl	= wdt_ioctl,
	.open		= wdt_open,
	.release	= wdt_close,
};

static struct miscdevice wdt_miscdev = {
	.minor = WATCHDOG_MINOR,
	.name = "watchdog",
	.fops = &wdt_fops,
};

/*
 *	The WDT needs to learn about soft shutdowns in order to
 *	turn the timebomb registers off.
 */

static struct notifier_block wdt_notifier = {
	.notifier_call = wdt_notify_sys,
};

static int w83697hf_check_wdt(void)
{
	if (!request_region(wdt_io, 2, WATCHDOG_NAME)) {
		pr_err("I/O address 0x%x already in use\n", wdt_io);
		return -EIO;
	}

	pr_debug("Looking for watchdog at address 0x%x\n", wdt_io);
	w83697hf_unlock();
	if (w83697hf_get_reg(0x20) == 0x60) {
		pr_info("watchdog found at address 0x%x\n", wdt_io);
		w83697hf_lock();
		return 0;
	}
	/* Reprotect in case it was a compatible device */
	w83697hf_lock();

	pr_info("watchdog not found at address 0x%x\n", wdt_io);
	release_region(wdt_io, 2);
	return -EIO;
}

static int w83697hf_ioports[] = { 0x2e, 0x4e, 0x00 };

static int __init wdt_init(void)
{
	int ret, i, found = 0;

	pr_info("WDT driver for W83697HF/HG initializing\n");

	if (wdt_io == 0) {
		/* we will autodetect the W83697HF/HG watchdog */
		for (i = 0; ((!found) && (w83697hf_ioports[i] != 0)); i++) {
			wdt_io = w83697hf_ioports[i];
			if (!w83697hf_check_wdt())
				found++;
		}
	} else {
		if (!w83697hf_check_wdt())
			found++;
	}

	if (!found) {
		pr_err("No W83697HF/HG could be found\n");
		ret = -EIO;
		goto out;
	}

	w83697hf_init();
	if (early_disable) {
		if (wdt_running())
			pr_warn("Stopping previously enabled watchdog until userland kicks in\n");
		wdt_disable();
	}

	if (wdt_set_heartbeat(timeout)) {
		wdt_set_heartbeat(WATCHDOG_TIMEOUT);
		pr_info("timeout value must be 1 <= timeout <= 255, using %d\n",
			WATCHDOG_TIMEOUT);
	}

	ret = register_reboot_notifier(&wdt_notifier);
	if (ret != 0) {
		pr_err("cannot register reboot notifier (err=%d)\n", ret);
		goto unreg_regions;
	}

	ret = misc_register(&wdt_miscdev);
	if (ret != 0) {
		pr_err("cannot register miscdev on minor=%d (err=%d)\n",
		       WATCHDOG_MINOR, ret);
		goto unreg_reboot;
	}

	pr_info("initialized. timeout=%d sec (nowayout=%d)\n",
		timeout, nowayout);

out:
	return ret;
unreg_reboot:
	unregister_reboot_notifier(&wdt_notifier);
unreg_regions:
	release_region(wdt_io, 2);
	goto out;
}

static void __exit wdt_exit(void)
{
	misc_deregister(&wdt_miscdev);
	unregister_reboot_notifier(&wdt_notifier);
	release_region(wdt_io, 2);
}
Пример #7
0
__NOMIPS16
#endif 
static ssize_t write_chan(struct tty_struct * tty, struct file * file,
			  const unsigned char * buf, size_t nr)
{
	const unsigned char *b = buf;
	DECLARE_WAITQUEUE(wait, current);
	int c;
	ssize_t retval = 0;

	/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
	if (L_TOSTOP(tty) && 
	    file->f_dentry->d_inode->i_rdev != CONSOLE_DEV &&
	    file->f_dentry->d_inode->i_rdev != SYSCONS_DEV) {
		retval = tty_check_change(tty);
		if (retval)
			return retval;
	}

	add_wait_queue(&tty->write_wait, &wait);
	while (1) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			break;
		}
		if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) {
			retval = -EIO;
			break;
		}
		if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
			while (nr > 0) {
				ssize_t num = opost_block(tty, b, nr);
				if (num < 0) {
					retval = num;
					goto break_out;
				}
				b += num;
				nr -= num;
				if (nr == 0)
					break;
				get_user(c, b);
				if (opost(c, tty) < 0)
					break;
				b++; nr--;
			}
			if (tty->driver.flush_chars)
				tty->driver.flush_chars(tty);
		} else {
			c = tty->driver.write(tty, 1, b, nr);
			if (c < 0) {
				retval = c;
				goto break_out;
			}
			b += c;
			nr -= c;
		}
		if (!nr)
			break;
		if (file->f_flags & O_NONBLOCK) {
			retval = -EAGAIN;
			break;
		}
		schedule();
	}
break_out:
	current->state = TASK_RUNNING;
	remove_wait_queue(&tty->write_wait, &wait);
	return (b - buf) ? b - buf : retval;
}
Пример #8
0
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct inode *inode = filp->f_dentry->d_inode;
	struct ext4_inode_info *ei = EXT4_I(inode);
	unsigned int flags;
	unsigned short rsv_window_size;

	ext4_debug ("cmd = %u, arg = %lu\n", cmd, arg);

	switch (cmd) {
	case EXT4_IOC_GETFLAGS:
		ext4_get_inode_flags(ei);
		flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
		return put_user(flags, (int __user *) arg);
	case EXT4_IOC_SETFLAGS: {
		handle_t *handle = NULL;
		int err;
		struct ext4_iloc iloc;
		unsigned int oldflags;
		unsigned int jflag;

		if (!is_owner_or_cap(inode))
			return -EACCES;

		if (get_user(flags, (int __user *) arg))
			return -EFAULT;

		err = mnt_want_write(filp->f_path.mnt);
		if (err)
			return err;

		if (!S_ISDIR(inode->i_mode))
			flags &= ~EXT4_DIRSYNC_FL;

		err = -EPERM;
		mutex_lock(&inode->i_mutex);
		/* Is it quota file? Do not allow user to mess with it */
		if (IS_NOQUOTA(inode))
			goto flags_out;

		oldflags = ei->i_flags;

		/* The JOURNAL_DATA flag is modifiable only by root */
		jflag = flags & EXT4_JOURNAL_DATA_FL;

		/*
		 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
		 * the relevant capability.
		 *
		 * This test looks nicer. Thanks to Pauline Middelink
		 */
		if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) {
			if (!capable(CAP_LINUX_IMMUTABLE))
				goto flags_out;
		}

		/*
		 * The JOURNAL_DATA flag can only be changed by
		 * the relevant capability.
		 */
		if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
			if (!capable(CAP_SYS_RESOURCE))
				goto flags_out;
		}

		handle = ext4_journal_start(inode, 1);
		if (IS_ERR(handle)) {
			err = PTR_ERR(handle);
			goto flags_out;
		}
		if (IS_SYNC(inode))
			handle->h_sync = 1;
		err = ext4_reserve_inode_write(handle, inode, &iloc);
		if (err)
			goto flags_err;

		flags = flags & EXT4_FL_USER_MODIFIABLE;
		flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE;
		ei->i_flags = flags;

		ext4_set_inode_flags(inode);
		inode->i_ctime = ext4_current_time(inode);

		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
flags_err:
		ext4_journal_stop(handle);
		if (err)
			goto flags_out;

		if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL))
			err = ext4_change_inode_journal_flag(inode, jflag);
flags_out:
		mutex_unlock(&inode->i_mutex);
		mnt_drop_write(filp->f_path.mnt);
		return err;
	}
	case EXT4_IOC_GETVERSION:
	case EXT4_IOC_GETVERSION_OLD:
		return put_user(inode->i_generation, (int __user *) arg);
	case EXT4_IOC_SETVERSION:
	case EXT4_IOC_SETVERSION_OLD: {
		handle_t *handle;
		struct ext4_iloc iloc;
		__u32 generation;
		int err;

		if (!is_owner_or_cap(inode))
			return -EPERM;

		err = mnt_want_write(filp->f_path.mnt);
		if (err)
			return err;
		if (get_user(generation, (int __user *) arg)) {
			err = -EFAULT;
			goto setversion_out;
		}

		handle = ext4_journal_start(inode, 1);
		if (IS_ERR(handle)) {
			err = PTR_ERR(handle);
			goto setversion_out;
		}
		err = ext4_reserve_inode_write(handle, inode, &iloc);
		if (err == 0) {
			inode->i_ctime = ext4_current_time(inode);
			inode->i_generation = generation;
			err = ext4_mark_iloc_dirty(handle, inode, &iloc);
		}
		ext4_journal_stop(handle);
setversion_out:
		mnt_drop_write(filp->f_path.mnt);
		return err;
	}
#ifdef CONFIG_JBD2_DEBUG
	case EXT4_IOC_WAIT_FOR_READONLY:
		/*
		 * This is racy - by the time we're woken up and running,
		 * the superblock could be released.  And the module could
		 * have been unloaded.  So sue me.
		 *
		 * Returns 1 if it slept, else zero.
		 */
		{
			struct super_block *sb = inode->i_sb;
			DECLARE_WAITQUEUE(wait, current);
			int ret = 0;

			set_current_state(TASK_INTERRUPTIBLE);
			add_wait_queue(&EXT4_SB(sb)->ro_wait_queue, &wait);
			if (timer_pending(&EXT4_SB(sb)->turn_ro_timer)) {
				schedule();
				ret = 1;
			}
			remove_wait_queue(&EXT4_SB(sb)->ro_wait_queue, &wait);
			return ret;
		}
#endif
	case EXT4_IOC_GETRSVSZ:
		if (test_opt(inode->i_sb, RESERVATION)
			&& S_ISREG(inode->i_mode)
			&& ei->i_block_alloc_info) {
			rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size;
			return put_user(rsv_window_size, (int __user *)arg);
		}
		return -ENOTTY;
	case EXT4_IOC_SETRSVSZ: {
		int err;

		if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
			return -ENOTTY;

		if (!is_owner_or_cap(inode))
			return -EACCES;

		if (get_user(rsv_window_size, (int __user *)arg))
			return -EFAULT;

		err = mnt_want_write(filp->f_path.mnt);
		if (err)
			return err;

		if (rsv_window_size > EXT4_MAX_RESERVE_BLOCKS)
			rsv_window_size = EXT4_MAX_RESERVE_BLOCKS;

		/*
		 * need to allocate reservation structure for this inode
		 * before set the window size
		 */
		down_write(&ei->i_data_sem);
		if (!ei->i_block_alloc_info)
			ext4_init_block_alloc_info(inode);

		if (ei->i_block_alloc_info){
			struct ext4_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
			rsv->rsv_goal_size = rsv_window_size;
		}
		up_write(&ei->i_data_sem);
		mnt_drop_write(filp->f_path.mnt);
		return 0;
	}
	case EXT4_IOC_GROUP_EXTEND: {
		ext4_fsblk_t n_blocks_count;
		struct super_block *sb = inode->i_sb;
		int err;

		if (!capable(CAP_SYS_RESOURCE))
			return -EPERM;

		if (get_user(n_blocks_count, (__u32 __user *)arg))
			return -EFAULT;

		err = mnt_want_write(filp->f_path.mnt);
		if (err)
			return err;

		err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
		jbd2_journal_flush(EXT4_SB(sb)->s_journal);
		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
		mnt_drop_write(filp->f_path.mnt);

		return err;
	}
	case EXT4_IOC_GROUP_ADD: {
		struct ext4_new_group_data input;
		struct super_block *sb = inode->i_sb;
		int err;

		if (!capable(CAP_SYS_RESOURCE))
			return -EPERM;

		if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
				sizeof(input)))
			return -EFAULT;

		err = mnt_want_write(filp->f_path.mnt);
		if (err)
			return err;

		err = ext4_group_add(sb, &input);
		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
		jbd2_journal_flush(EXT4_SB(sb)->s_journal);
		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
		mnt_drop_write(filp->f_path.mnt);

		return err;
	}

	case EXT4_IOC_MIGRATE:
	{
		int err;
		if (!is_owner_or_cap(inode))
			return -EACCES;

		err = mnt_want_write(filp->f_path.mnt);
		if (err)
			return err;
		/*
		 * inode_mutex prevent write and truncate on the file.
		 * Read still goes through. We take i_data_sem in
		 * ext4_ext_swap_inode_data before we switch the
		 * inode format to prevent read.
		 */
		mutex_lock(&(inode->i_mutex));
		err = ext4_ext_migrate(inode);
		mutex_unlock(&(inode->i_mutex));
		mnt_drop_write(filp->f_path.mnt);
		return err;
	}

	default:
		return -ENOTTY;
	}
}
Пример #9
0
/* execute commands to control the i/o behaviour of the SCLP tty at runtime */
static int
sclp_tty_ioctl(struct tty_struct *tty, struct file * file,
	       unsigned int cmd, unsigned long arg)
{
	unsigned long flags;
	unsigned int obuf;
	int check;
	int rc;

	if (tty->flags & (1 << TTY_IO_ERROR))
		return -EIO;
	rc = 0;
	check = 0;
	switch (cmd) {
	case TIOCSCLPSHTAB:
		/* set width of horizontal tab	*/
		if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg))
			rc = -EFAULT;
		else
			check = 1;
		break;
	case TIOCSCLPGHTAB:
		/* get width of horizontal tab	*/
		if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg))
			rc = -EFAULT;
		break;
	case TIOCSCLPSECHO:
		/* enable/disable echo of input */
		if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg))
			rc = -EFAULT;
		break;
	case TIOCSCLPGECHO:
		/* Is echo of input enabled ?  */
		if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg))
			rc = -EFAULT;
		break;
	case TIOCSCLPSCOLS:
		/* set number of columns for output  */
		if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg))
			rc = -EFAULT;
		else
			check = 1;
		break;
	case TIOCSCLPGCOLS:
		/* get number of columns for output  */
		if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg))
			rc = -EFAULT;
		break;
	case TIOCSCLPSNL:
		/* enable/disable writing without final new line character  */
		if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg))
			rc = -EFAULT;
		break;
	case TIOCSCLPGNL:
		/* Is writing without final new line character enabled ?  */
		if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg))
			rc = -EFAULT;
		break;
	case TIOCSCLPSOBUF:
		/*
		 * set the maximum buffers size for output, will be rounded
		 * up to next 4kB boundary and stored as number of SCCBs
		 * (4kB Buffers) limitation: 256 x 4kB
		 */
		if (get_user(obuf, (unsigned int __user *) arg) == 0) {
			if (obuf & 0xFFF)
				sclp_ioctls.max_sccb = (obuf >> 12) + 1;
			else
				sclp_ioctls.max_sccb = (obuf >> 12);
		} else
Пример #10
0
long exynos_mem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case EXYNOS_MEM_SET_CACHEABLE:
	{
		struct exynos_mem *mem = filp->private_data;
		int cacheable;
		if (get_user(cacheable, (u32 __user *)arg)) {
			pr_err("[%s:%d] err: EXYNOS_MEM_SET_CACHEABLE\n",
				__func__, __LINE__);
			return -EFAULT;
		}
		mem->cacheable = cacheable;
		break;
	}

	case EXYNOS_MEM_PADDR_CACHE_FLUSH:
	{
		struct exynos_mem_flush_range range;
		if (copy_from_user(&range,
				   (struct exynos_mem_flush_range __user *)arg,
				   sizeof(range))) {
			pr_err("[%s:%d] err: EXYNOS_MEM_PADDR_CACHE_FLUSH\n",
				__func__, __LINE__);
			return -EFAULT;
		}

		cache_maint_phys(range.start, range.length, EM_FLUSH);
		break;
	}
	case EXYNOS_MEM_PADDR_CACHE_CLEAN:
	{
		struct exynos_mem_flush_range range;
		if (copy_from_user(&range,
				   (struct exynos_mem_flush_range __user *)arg,
				   sizeof(range))) {
			pr_err("[%s:%d] err: EXYNOS_MEM_PADDR_CACHE_FLUSH\n",
				__func__, __LINE__);
			return -EFAULT;
		}

		cache_maint_phys(range.start, range.length, EM_CLEAN);
		break;
	}
	case EXYNOS_MEM_SET_PHYADDR:
	{
		struct exynos_mem *mem = filp->private_data;
		int phyaddr;
		if (get_user(phyaddr, (u32 __user *)arg)) {
			pr_err("[%s:%d] err: EXYNOS_MEM_SET_PHYADDR\n",
				__func__, __LINE__);
			return -EFAULT;
		}
		mem->phybase = phyaddr >> PAGE_SHIFT;

		break;
	}

	default:
		pr_err("[%s:%d] error command\n", __func__, __LINE__);
		return -EINVAL;
	}

	return 0;
}
Пример #11
0
static int dma_ioctl(struct inode *inode, struct file *filp,
                     unsigned int cmd, unsigned long args)
#endif
{
    unsigned int __user *argp = (unsigned int __user *) args;
    int isParam = 0;
#if defined(LSP_210)
    int isQdma = 0;
#endif  /* defined(LSP_210) */
    int result;
    int dev_id;
    int channel;
    struct EDMA_requestDmaParams dma_req;
    struct EDMA_releaseDmaParams dma_rel;
    struct list_head *registeredlistp;
    struct list_head *u;
    struct list_head *unext;
    struct registered_user *user;

    if (_IOC_TYPE(cmd) != _IOC_TYPE(EDMA_IOCMAGIC)) {
        __E("dma_ioctl(): bad command type 0x%x (should be 0x%x)\n",
            _IOC_TYPE(cmd), _IOC_TYPE(EDMA_IOCMAGIC));
    }

    switch (cmd & EDMA_IOCCMDMASK) {

      case EDMA_IOCREQUESTDMA:
        __D("dma_ioctl(): EDMA_IOCREQUESTDMA called\n");

        if (copy_from_user(&dma_req, argp, sizeof(dma_req))) {
            return -EFAULT;
        }

        __D("dev_id: %d, eventq_no: %d, tcc: %d, param: %d, nParam: %d\n",
            dma_req.dev_id, dma_req.eventq_no, dma_req.tcc,
            dma_req.param, dma_req.nParam);

        dev_id = dma_req.dev_id;

        /*
         * In order to not be dependent on the LSP #defines, we need to
         * translate our EDMA interface's #defines to the LSP ones.
         */
        if (dev_id >= EDMA_QDMA0 && dev_id <= EDMA_QDMA7) {
#if defined(LSP_210)
            dev_id = EDMA_QDMA_CHANNEL_0 + (dev_id - EDMA_QDMA0);
            isQdma = 1;
#else   /* defined(LSP_210) */
            __E("%s: REQUESTDMA failed: QDMA is not supported\n",
                __FUNCTION__);

            return -EINVAL;
#endif  /* defined(LSP_210) */
        }
        else {
            switch (dev_id) {
              case EDMA_PARAMANY:
                dev_id = EDMA_CONT_PARAMS_ANY;
                isParam = 1;
                break;

              case EDMA_PARAMFIXEDEXACT:
                dev_id = EDMA_CONT_PARAMS_FIXED_EXACT;
                isParam = 1;
                break;

              case EDMA_PARAMFIXEDNOTEXACT:
                dev_id = EDMA_CONT_PARAMS_FIXED_NOT_EXACT;
                isParam = 1;
                break;

              case EDMA_EDMAANY:
#if defined(LSP_210)
                dev_id = EDMA_DMA_CHANNEL_ANY;
#else   /* defined(LSP_210) */
                dev_id = EDMA_CHANNEL_ANY;
#endif  /* defined(LSP_210) */
                break;

              case EDMA_QDMAANY:
#if defined(LSP_210)
                dev_id = EDMA_QDMA_CHANNEL_ANY;
                isQdma = 1;
                break;
#else   /* defined(LSP_210) */
                __E("%s: REQUESTDMA failed: QDMA is not supported\n",
                    __FUNCTION__);
                return -EINVAL;
#endif  /* defined(LSP_210) */

              default:
                /* do nothing, dev_id is an EDMA channel # */
                break;
            }
        }

#if defined(LSP_210)
        switch (dma_req.tcc) {
            case EDMA_TCCANY:
                dma_req.tcc = EDMA_TCC_ANY;
                break;

            case EDMA_TCCSYMM:
                dma_req.tcc = EDMA_TCC_SYMM;
                break;

            default:
                /* do nothing, tcc is an EDMA TCC # */
                break;
        }
#endif  /* defined(LSP_210) */

        if (isParam) {
#if defined(LSP_210)
            __D("calling davinci_request_params(%d, %d, %d)...\n", dev_id,
                dma_req.nParam, dma_req.param);
            result = davinci_request_params(dev_id, dma_req.nParam,
                                            dma_req.param);

            __D("...returned %d\n", result);

            if (result >= 0) {
                dma_req.channel = result;
                dma_req.param = result;
                /* transform to 0-based success for below common code */
                result = 0;
            }

#else   /* defined(LSP_210) */

            __D("calling edma_alloc_cont_slots(0, %d, %d, %d)...\n",
                dev_id, dma_req.param, dma_req.nParam);
            result = edma_alloc_cont_slots(0, dev_id, dma_req.param, dma_req.nParam);

            __D("...returned %d\n", result);

            if (result >= 0) {
                if (EDMA_CTLR(result) != 0) {
                    __E("%s: REQUESTDMA failed to obtain a channel from controller 0 (obtained channel %d from controller %d)\n", __FUNCTION__, EDMA_CHAN_SLOT(result), EDMA_CTLR(result));

                    release_channel(result);
                }
                else {
                    dma_req.channel = EDMA_CHAN_SLOT(result);
                    dma_req.param = dma_req.channel;
                    /* transform to 0-based success for below common code */
                    result = 0;
                }
            }
#endif  /* defined(LSP_210) */
        }
        else {

#if defined(LSP_210)
            if (dma_req.tcc == -1) {
                __E("%s: REQUESTDMA failed: TCC -1 supported only for PaRAM allocations\n", __FUNCTION__);

                return -EINVAL;
            }

            result = davinci_request_dma(dev_id, "linuxutils DMA",
                                         NULL, (void *)NULL,
                                         &dma_req.channel, &dma_req.tcc,
                                         dma_req.eventq_no);

#else   /* defined(LSP_210) */

            result = edma_alloc_channel(dev_id, NULL, NULL, dma_req.eventq_no);

            if (result >= 0) {
                if (EDMA_CTLR(result) != 0) {
                    __E("%s: REQUESTDMA failed to obtain a channel from controller 0 (obtained channel %d from controller %d, will now free it)\n", __FUNCTION__, EDMA_CHAN_SLOT(result), EDMA_CTLR(result));

                    release_channel(result);
                }
                else {
                    dma_req.channel = EDMA_CHAN_SLOT(result);
                    dma_req.tcc = dma_req.channel;
                    /* transform to 0-based success for below common code */
                    result = 0;
                }
            }
#endif  /* defined(LSP_210) */
        }

        if (result) {
            __E("%s: REQUESTDMA failed: %d\n", __FUNCTION__, result);

            return -ENOMEM;
        }
        else {
            /* For EDMA_PARAMANY we've already assigned dma_req.param above */
            if (!isParam) {
#if defined(LSP_210)
                dma_req.param = davinci_get_param(dma_req.channel);
#else   /* defined(LSP_210) */
                dma_req.param = dma_req.channel; /* one-to-one mapping */
#endif  /* defined(LSP_210) */
            }

#if defined(LSP_210)
            /* Translate LSP's QDMA #s to linuxutil's QDMA #s */
            if (isQdma) {
                dma_req.channel = (dma_req.channel - EDMA_QDMA_CHANNEL_0) +
                                  EDMA_QDMA0;
            }
#endif  /* defined(LSP_210) */

            __D("  dma channel %d allocated\n", dma_req.channel);

            __D("copying to user\n");

            if (copy_to_user(argp, &dma_req, sizeof(dma_req))) {
                return -EFAULT;
            }
        }

        user = kmalloc(sizeof(struct registered_user), GFP_KERNEL);
        if (!user) {
            __E("%s: REQUESTDMA failed to kmalloc registered_user struct",
                __FUNCTION__);

            release_channel(dma_req.channel);

            return -ENOMEM;
        }

        if (mutex_lock_interruptible(&edma_mutex)) {
            kfree(user);

            release_channel(dma_req.channel);

            return -ERESTARTSYS;
        }

        user->filp = filp;
        list_add(&user->element, &channels[dma_req.channel].users);

        if (isParam) {
            channels[dma_req.channel].nParam = dma_req.nParam;
            channels[dma_req.channel].isParam = 1;
        }
        else {
            channels[dma_req.channel].nParam = 1;
            channels[dma_req.channel].isParam = 0;
        }

        mutex_unlock(&edma_mutex);

        break;

      case EDMA_IOCREGUSER:
        __D("dma_ioctl(): EDMA_IOCREGUSER called\n");

        if (get_user(channel, argp)) {
            return -EFAULT;
        }

        __D("  channel %d\n", channel);

        if (channel >= NCHAN) {
            __E("%s: REGUSER failed: channel %d out of range\n",
                __FUNCTION__, channel);

            return -ERANGE;
        }

        registeredlistp = &channels[channel].users;
        if (registeredlistp != registeredlistp->next) {
            user = kmalloc(sizeof(struct registered_user), GFP_KERNEL);
            if (!user) {
                __E("%s: REGUSER failed to kmalloc registered_user struct",
                    __FUNCTION__);
                return -ENOMEM;
            }

            if (mutex_lock_interruptible(&edma_mutex)) {
                kfree(user);

                return -ERESTARTSYS;
            }

            user->filp = filp;
            list_add(&user->element, &channels[channel].users);

            mutex_unlock(&edma_mutex);
        }
        else {
            __E("%s: REGUSER failed: channel %d not currently allocated\n",
                __FUNCTION__, channel);

            return -EFAULT;
        }

        break;

      case EDMA_IOCRELEASEDMA:
        __D("dma_ioctl(): EDMA_IOCRELEASEDMA called\n");

        if (copy_from_user(&dma_rel, argp, sizeof(dma_rel))) {
            return -EFAULT;
        }

        __D("  channel %d\n", dma_rel.channel);

        channel = dma_rel.channel;
        if (channel >= NCHAN) {
            __E("%s: REGUSER failed: channel %d out of range\n",
                __FUNCTION__, channel);

            return -ERANGE;
        }

        if (mutex_lock_interruptible(&edma_mutex)) {
            return -ERESTARTSYS;
        }

        registeredlistp = &channels[channel].users;
        u = registeredlistp->next;
        while (u != registeredlistp) {
            unext = u->next;

            user = list_entry(u, struct registered_user, element);
            if (user->filp == filp) {
                __D("  removing registered user from channel %d list\n",
                    channel);

                list_del(u);
                kfree(user);

                /*
                 * Only remove once (we allow multiple "registers", and each
                 * one requires a corresponding "release").
                 */
                break;
            }

            u = unext;
        }

        mutex_unlock(&edma_mutex);

        if (u == registeredlistp) {
            __E("%s: RELEASEDMA failed: file %p not registered for channel %d\n",
                __FUNCTION__, filp, channel);

            return -EFAULT;
        }

        if (mutex_lock_interruptible(&edma_mutex)) {
            return -ERESTARTSYS;
        }

        if (registeredlistp->next == registeredlistp) {
            __D("  no more registered users, freeing channel %d\n", channel);

            release_channel(channel);
        }

        mutex_unlock(&edma_mutex);

        break;

      case EDMA_IOCGETVERSION:
        __D("GETVERSION ioctl received, returning %#x.\n", version);

        if (put_user(version, argp)) {
            return -EFAULT;
        }

        break;

      case EDMA_IOCGETBASEPHYSADDR:
        __D("GETBASEPHYSADDR ioctl received, returning %#x.\n", BASEADDR);

        if (put_user(BASEADDR, argp)) {
            __E("%s: GETBASEPHYSADDR: put_user() failed, returning -EFAULT!\n",
                __FUNCTION__);

            return -EFAULT;
        }

        break;
    }

    return 0;
}
Пример #12
0
/* do_seen(): Checks if someone matches the mask, and returns the reply
 * mask : first paramater (e.g. "G`Quann", "G`Quann", "*!*@*.isp.de", ...)
 * nick : nick of the one, who triggered the command
 * uhost: user@host of nick
 * chan : chan, where the command was triggered
 * bns  :
 *        1 : do a botnet-seen if no matches are found
 *        0 : don't do a botnet-seen
 *       -1 : return NULL instead of text, if no matches were found
 *            (necessary for botnet seen)
 */
static char *do_seen(char *mask, char *nick, char *uhost, char *chan, int bns)
{
  char hostbuf[UHOSTLEN + 1], *host, *newhost, *tmp, *dur;
  seendat *l;
  gseenres *r;
  int wild, nr;
  char bnquery[256];
  struct userrec *u;
  struct laston_info *li;
  struct chanset_t *ch;

  Context;
  start_seentime_calc();
  if (seen_reply) {
    nfree(seen_reply);
    seen_reply = NULL;
  }
  l = NULL;
  li = NULL;
  host = hostbuf;
  newhost = NULL;
  mask = newsplit(&mask);
  glob_query = mask;
  while (mask[0] == ' ')
    mask++;
  if (!mask[0]) {
    return SLNOPARAM;
  }
  if (strchr(mask, '?') || strchr(mask, '*')) {
    // if wildcard-searches ares not allowed, then either return
    // NULL (for botnet-seen), or a appropriate warning
    if (!wildcard_search) {
      if (bns == -1)
        return NULL;
      else
        return SLNOWILDCARDS;
    } else
      wild = 1;
  } else {
    if (strlen(mask) > seen_nick_len) // don't process if requested nick is too long
      return SLTOOLONGNICK;      // (e.g. stop stupid jokes)
    if (!strcasecmp(mask, nick)) {
      return SLMIRROR;
    }
    // check if the nick is on the current channel
    if (onchan(mask, chan))
      return SLONCHAN;
    if ((glob_othernick = handonchan(mask, chan)))
      return SLHANDONCHAN;
    // check if it is on any other channel
    if ((ch = onanychan(mask))) {
#if EGG_IS_MIN_VER(10500)
      if (!secretchan(ch->dname)) {
	glob_otherchan = ch->dname;
        return SLONOTHERCHAN;
      }
#else
      if (!secretchan(ch->name)) {
	glob_otherchan = ch->name;
        return SLONOTHERCHAN;
      }
#endif
    }
    // check if the user who uses this handle is on the channel under
    // a different nick
    if ((ch = handonanychan(mask))) {
#if EGG_IS_MIN_VER(10500)
      if (!secretchan(ch->dname)) {
        glob_otherchan = ch->dname;
        return SLONOTHERCHAN;
      }
#else
      if (!secretchan(ch->name)) {
        glob_otherchan = ch->name;
        return SLONOTHERCHAN;
      }
#endif
    }
    add_seenreq(mask, nick, uhost, chan, now);
    wild = 0;
    l = findseen(mask);
    // if there's a result, and if we don't want to search for the same user
    // under a different nick, just make a do_seennick on the result
    if (l && !fuzzy_search) {
      tmp = do_seennick(l);
      end_seentime_calc();
      return tmp;
    }
    if (!l) {
      u = get_user_by_handle(userlist, mask);
      if (u) {
        li = get_user(&USERENTRY_LASTON, u);
      }
      if (!u || !li) {
        if (bns == -1) {       // if bns is 0, then do_seen() was triggered by
          end_seentime_calc(); // a botnet seen function, which needs a clear
          return NULL;         // NULL to detect if there was a result or not
        }
        tmp = SLNOTSEEN;
        if (bns && ((strlen(mask) + strlen(nick) + strlen(uhost)
            + strlen(chan) + 20) < 255)) {
          debug0("trying botnet seen");
          if (bnsnick)
            nfree(bnsnick);
          if (bnschan)
            nfree(bnschan);
          bnsnick = nmalloc(strlen(nick) + 1);
          strcpy(bnsnick, nick);
          bnschan = nmalloc(strlen(chan) + 1);
          strcpy(bnschan, chan);
          sprintf(bnquery, "gseen_req %s %s %s %s", mask, nick, uhost, chan);
          botnet_send_zapf_broad(-1, botnetnick, NULL, bnquery);
        }
      } else {
        // we have a matching handle, no seen-entry, but a laston entry
        // in the userbase, so let's just return that one.
        dur = gseen_duration(now - li->laston);
        glob_laston = dur;
        tmp = SLPOORSEEN;
        seen_reply = nmalloc(strlen(tmp) + 1);
        strcpy(seen_reply, tmp);
        end_seentime_calc();
        return seen_reply;
      }
      end_seentime_calc();
      return tmp;
    }
    // now prepare the host for fuzzy-search
    if (strlen(l->host) < UHOSTLEN) {
      maskstricthost(l->host, host);
      host = strchr(host, '!') + 1; // strip nick from host for faster search
    } else {
      end_seentime_calc();
      return "error, too long host";
    }
  }
  if (l && (l->type == SEEN_CHPT)) {
    tmp = do_seennick(l);
    end_seentime_calc();
    return tmp;
  }
  numresults = 0;
  // wildmatch_seens uses a global var to store hosts in it
  // (to prevent massive nmalloc/nfree-usage), so don't forget
  // to initialize and free it
  temp_wildmatch_host = my_malloc(1);
  wildmatch_seens(host, mask, wild);
  my_free(temp_wildmatch_host);
  temp_wildmatch_host = NULL;
  if (!results) {
    end_seentime_calc();
    if (bns == -1)
      return NULL; // let the botnet seen function know, that seen failed
    return SLNOMATCH;
  }
  if (numresults >= max_matches) {
    end_seentime_calc();
    free_seenresults();
    return SLTOOMANYMATCHES;
  }
  sortresults();
  if (strcasecmp(results->seen->nick, mask)) {
    // if the user's latest nick is not the nick for which we were searching,
    // say that there were multiple matches and display the latest one
    if (numresults == 1)
      tmp = SLONEMATCH;
    else if (numresults <= 20)
      tmp = SLLITTLEMATCHES;
    else
      tmp = SLMANYMATCHES;
    seen_reply = nmalloc(strlen(tmp) + 1);
    strcpy(seen_reply, tmp);
    nr = 0;
    for (r = results; (r && (nr < 20)); r = r->next) {
      nr++;
      if (nr > 1) {
        seen_reply = nrealloc(seen_reply, 1 + strlen(seen_reply) + 1 + strlen(r->seen->nick) + 1);
        strcat(seen_reply, ", ");
      } else {
	seen_reply = nrealloc(seen_reply, 1 + strlen(seen_reply) + strlen(r->seen->nick) + 1);
        strcat(seen_reply, " ");
      }
      strcat(seen_reply, r->seen->nick);
    }
    tmp = do_seennick(results->seen);
    seen_reply = nrealloc(seen_reply, 2 + strlen(seen_reply) + strlen(tmp) + 1);
    sprintf(seen_reply, "%s. %s", seen_reply, tmp);
  } else { // first result is the nick which we were searching for
    // just return the info for this nick and don't care about other results
    tmp = do_seennick(results->seen);
    seen_reply = nmalloc(strlen(tmp) + 1);
    strcpy(seen_reply, tmp);
  }
  free_seenresults();
  end_seentime_calc();
  return seen_reply;
}
Пример #13
0
int em8300_control_ioctl(struct em8300_s *em, int cmd, unsigned long arg)
{
	em8300_register_t reg;
	int val, len;
	em8300_bcs_t bcs;
	em8300_overlay_window_t ov_win;
	em8300_overlay_screen_t ov_scr;
	em8300_overlay_calibrate_t ov_cal;
	em8300_attribute_t attr;
	int old_count;
	long ret;

	if (_IOC_DIR(cmd) != 0) {
		len = _IOC_SIZE(cmd);

		if (len < 1 || len > 65536 || arg == 0) {
			return -EFAULT;
		}
		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			if (!access_ok(VERIFY_READ, (void *) arg, len)) {
				return -EFAULT;
			}
		}
		if (_IOC_DIR(cmd) & _IOC_READ) {
			if (!access_ok(VERIFY_WRITE, (void *) arg, len)) {
				return -EFAULT;
			}
		}
	}

	switch (_IOC_NR(cmd)) {
	case _IOC_NR(EM8300_IOCTL_INIT):
		return em8300_ioctl_init(em, (em8300_microcode_t *) arg);

	case _IOC_NR(EM8300_IOCTL_WRITEREG):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (copy_from_user(&reg, (void *) arg, sizeof(em8300_register_t)))
			return -EFAULT;

		if (reg.microcode_register) {
			write_ucregister(reg.reg, reg.val);
		} else {
			write_register(reg.reg, reg.val);
		}
		break;

	case _IOC_NR(EM8300_IOCTL_READREG):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (copy_from_user(&reg, (void *) arg, sizeof(em8300_register_t)))
			return -EFAULT;

		if (reg.microcode_register) {
			reg.val = read_ucregister(reg.reg);
			reg.reg = ucregister(reg.reg);
		} else {
			reg.val = read_register(reg.reg);
		}
		if (copy_to_user((void *) arg, &reg, sizeof(em8300_register_t)))
			return -EFAULT;
		break;

	case _IOC_NR(EM8300_IOCTL_VBI):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		old_count = em->irqcount;
		em->irqmask |= IRQSTATUS_VIDEO_VBL;
		write_ucregister(Q_IrqMask, em->irqmask);

		ret = wait_event_interruptible_timeout(em->vbi_wait, em->irqcount != old_count, HZ);
		if (ret == 0)
			return -EINTR;
		else if (ret < 0)
			return ret;

		/* copy timestamp and return */
		if (copy_to_user((void *) arg, &em->tv, sizeof(struct timeval)))
			return -EFAULT;
		return 0;

	case _IOC_NR(EM8300_IOCTL_GETBCS):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			if (copy_from_user(&bcs, (void *) arg, sizeof(em8300_bcs_t)))
				return -EFAULT;
			em8300_dicom_setBCS(em, bcs.brightness, bcs.contrast, bcs.saturation);
		}

		if (_IOC_DIR(cmd) & _IOC_READ) {
			bcs.brightness = em->dicom_brightness;
			bcs.contrast = em->dicom_contrast;
			bcs.saturation = em->dicom_saturation;
			if (copy_to_user((void *) arg, &bcs, sizeof(em8300_bcs_t)))
				return -EFAULT;
		}
		break;

	case _IOC_NR(EM8300_IOCTL_SET_VIDEOMODE):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			get_user(val, (int *) arg);
			return em8300_ioctl_setvideomode(em, val);
		}

		if (_IOC_DIR(cmd) & _IOC_READ) {
			if (copy_to_user((void *) arg, &em->video_mode, sizeof(em->video_mode)))
				return -EFAULT;
		}
		break;

	case _IOC_NR(EM8300_IOCTL_SET_PLAYMODE):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			get_user(val, (int *) arg);
			return em8300_ioctl_setplaymode(em, val);
		}
		break;

	case _IOC_NR(EM8300_IOCTL_SET_ASPECTRATIO):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			get_user(val, (int *) arg);
			em8300_ioctl_setaspectratio(em, val);
		}

		if (_IOC_DIR(cmd) & _IOC_READ) {
			if (copy_to_user((void *) arg, &em->aspect_ratio, sizeof(em->aspect_ratio)))
				return -EFAULT;
		}
		break;
	case _IOC_NR(EM8300_IOCTL_GET_AUDIOMODE):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			get_user(val, (int *) arg);
			em8300_ioctl_setaudiomode(em, val);
		}
		if (_IOC_DIR(cmd) & _IOC_READ) {
			em8300_ioctl_getaudiomode(em, arg);
		}
		break;
	case _IOC_NR(EM8300_IOCTL_SET_SPUMODE):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			get_user(val, (int *) arg);
			em8300_ioctl_setspumode(em, val);
		}

		if (_IOC_DIR(cmd) & _IOC_READ) {
			if (copy_to_user((void *) arg, &em->sp_mode, sizeof(em->sp_mode)))
				return -EFAULT;
		}
		break;

	case _IOC_NR(EM8300_IOCTL_OVERLAY_SETMODE):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			get_user(val, (int *) arg);
			if (!em8300_ioctl_overlay_setmode(em, val)) {
				return -EINVAL;
			}
		}
		break;

	case _IOC_NR(EM8300_IOCTL_OVERLAY_SIGNALMODE):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			get_user(val, (int *) arg);
			if (!em9010_overlay_set_signalmode(em, val)) {
				return -EINVAL;
			}
		}
		break;

	case _IOC_NR(EM8300_IOCTL_OVERLAY_SETWINDOW):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			if (copy_from_user(&ov_win, (void *) arg, sizeof(em8300_overlay_window_t)))
				return -EFAULT;
			if (!em8300_ioctl_overlay_setwindow(em, &ov_win)) {
				return -EINVAL;
			}
		}
		if (_IOC_DIR(cmd) & _IOC_READ) {
			if (copy_to_user((void *) arg, &ov_win, sizeof(em8300_overlay_window_t)))
				return -EFAULT;
		}
		break;

	case _IOC_NR(EM8300_IOCTL_OVERLAY_SETSCREEN):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			if (copy_from_user(&ov_scr, (void *) arg, sizeof(em8300_overlay_screen_t)))
				return -EFAULT;
			if (!em8300_ioctl_overlay_setscreen(em, &ov_scr)) {
				return -EINVAL;
			}
		}
		if (_IOC_DIR(cmd) & _IOC_READ) {
			if (copy_to_user((void *) arg, &ov_scr, sizeof(em8300_overlay_screen_t)))
				return -EFAULT;
		}
	break;

	case _IOC_NR(EM8300_IOCTL_OVERLAY_CALIBRATE):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			if (copy_from_user(&ov_cal, (void *) arg, sizeof(em8300_overlay_calibrate_t)))
				return -EFAULT;
			if(!em8300_ioctl_overlay_calibrate(em, &ov_cal)) {
				return -EIO;
			}
		}

		if (_IOC_DIR(cmd) & _IOC_READ) {
			if (copy_to_user((void *) arg, &ov_cal, sizeof(em8300_overlay_calibrate_t)))
				return -EFAULT;
		}
	break;

	case _IOC_NR(EM8300_IOCTL_OVERLAY_GET_ATTRIBUTE):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (copy_from_user(&attr, (void *) arg, sizeof(em8300_attribute_t)))
			return -EFAULT;
		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			em9010_set_attribute(em, attr.attribute, attr.value);
		}
		if (_IOC_DIR(cmd) & _IOC_READ) {
			attr.value = em9010_get_attribute(em, attr.attribute);
			if (copy_to_user((void *) arg, &attr, sizeof(em8300_attribute_t)))
				return -EFAULT;
		}
		break;

	case _IOC_NR(EM8300_IOCTL_SCR_GET):
		em8300_require_ucode(em);

		if (!em->ucodeloaded) {
			return -ENOTTY;
		}

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			unsigned scr;
			if (get_user(val, (unsigned *) arg))
				return -EFAULT;
			scr = read_ucregister(MV_SCRlo) | (read_ucregister(MV_SCRhi) << 16);

			if (scr > val)
				scr = scr - val;
			else
				scr = val - scr;

			if (scr > 2 * 1800) { /* Tolerance: 2 frames */
				pr_info("em8300-%d: adjusting scr: %i\n", em->card_nr, val);
				write_ucregister(MV_SCRlo, val & 0xffff);
				write_ucregister(MV_SCRhi, (val >> 16) & 0xffff);
			}
		}
Пример #14
0
static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	int status;
	int new_options, retval = -EINVAL;
	int new_timeout;
	union {
		struct watchdog_info __user *ident;
		int __user *i;
	} uarg;

	uarg.i = (int __user *)arg;

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		return copy_to_user(uarg.ident, &ident,
						sizeof(ident)) ? -EFAULT : 0;

	case WDIOC_GETSTATUS:
		wdt_get_status(&status);
		return put_user(status, uarg.i);

	case WDIOC_GETBOOTSTATUS:
		return put_user(0, uarg.i);

	case WDIOC_SETOPTIONS:
		if (get_user(new_options, uarg.i))
			return -EFAULT;

		if (new_options & WDIOS_DISABLECARD) {
			wdt_stop();
			retval = 0;
		}

		if (new_options & WDIOS_ENABLECARD) {
			wdt_start();
			retval = 0;
		}

		return retval;

	case WDIOC_KEEPALIVE:
		wdt_keepalive();
		return 0;

	case WDIOC_SETTIMEOUT:
		if (get_user(new_timeout, uarg.i))
			return -EFAULT;

		if (wdt_set_timeout(new_timeout))
			return -EINVAL;

		wdt_keepalive();
		/* Fall */

	case WDIOC_GETTIMEOUT:
		return put_user(timeout, uarg.i);

	default:
		return -ENOTTY;

	}
}
Пример #15
0
long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct hpi_ioctl_linux __user *phpi_ioctl_data;
	void __user *puhm;
	void __user *puhr;
	union hpi_message_buffer_v1 *hm;
	union hpi_response_buffer_v1 *hr;
	u16 res_max_size;
	u32 uncopied_bytes;
	struct hpi_adapter *pa = NULL;
	int err = 0;

	if (cmd != HPI_IOCTL_LINUX)
		return -EINVAL;

	hm = kmalloc(sizeof(*hm), GFP_KERNEL);
	hr = kmalloc(sizeof(*hr), GFP_KERNEL);
	if (!hm || !hr) {
		err = -ENOMEM;
		goto out;
	}

	phpi_ioctl_data = (struct hpi_ioctl_linux __user *)arg;

	/* Read the message and response pointers from user space.  */
	if (get_user(puhm, &phpi_ioctl_data->phm) ||
	    get_user(puhr, &phpi_ioctl_data->phr)) {
		err = -EFAULT;
		goto out;
	}

	/* Now read the message size and data from user space.  */
	if (get_user(hm->h.size, (u16 __user *)puhm)) {
		err = -EFAULT;
		goto out;
	}
	if (hm->h.size > sizeof(*hm))
		hm->h.size = sizeof(*hm);

	/*printk(KERN_INFO "message size %d\n", hm->h.wSize); */

	uncopied_bytes = copy_from_user(hm, puhm, hm->h.size);
	if (uncopied_bytes) {
		HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
		err = -EFAULT;
		goto out;
	}

	if (get_user(res_max_size, (u16 __user *)puhr)) {
		err = -EFAULT;
		goto out;
	}
	/* printk(KERN_INFO "user response size %d\n", res_max_size); */
	if (res_max_size < sizeof(struct hpi_response_header)) {
		HPI_DEBUG_LOG(WARNING, "small res size %d\n", res_max_size);
		err = -EFAULT;
		goto out;
	}

	pa = &adapters[hm->h.adapter_index];
	hr->h.size = 0;
	if (hm->h.object == HPI_OBJ_SUBSYSTEM) {
		switch (hm->h.function) {
		case HPI_SUBSYS_CREATE_ADAPTER:
		case HPI_SUBSYS_DELETE_ADAPTER:
			/* Application must not use these functions! */
			hr->h.size = sizeof(hr->h);
			hr->h.error = HPI_ERROR_INVALID_OPERATION;
			hr->h.function = hm->h.function;
			uncopied_bytes = copy_to_user(puhr, hr, hr->h.size);
			if (uncopied_bytes)
				err = -EFAULT;
			else
				err = 0;
			goto out;

		default:
			hpi_send_recv_f(&hm->m0, &hr->r0, file);
		}
	} else {
		u16 __user *ptr = NULL;
		u32 size = 0;

		/* -1=no data 0=read from user mem, 1=write to user mem */
		int wrflag = -1;
		u32 adapter = hm->h.adapter_index;

		if ((hm->h.adapter_index > HPI_MAX_ADAPTERS) || (!pa->type)) {
			hpi_init_response(&hr->r0, HPI_OBJ_ADAPTER,
				HPI_ADAPTER_OPEN,
				HPI_ERROR_BAD_ADAPTER_NUMBER);

			uncopied_bytes =
				copy_to_user(puhr, hr, sizeof(hr->h));
			if (uncopied_bytes)
				err = -EFAULT;
			else
				err = 0;
			goto out;
		}

		if (mutex_lock_interruptible(&adapters[adapter].mutex)) {
			err = -EINTR;
			goto out;
		}

		/* Dig out any pointers embedded in the message.  */
		switch (hm->h.function) {
		case HPI_OSTREAM_WRITE:
		case HPI_ISTREAM_READ:{
				/* Yes, sparse, this is correct. */
				ptr = (u16 __user *)hm->m0.u.d.u.data.pb_data;
				size = hm->m0.u.d.u.data.data_size;

				/* Allocate buffer according to application request.
				   ?Is it better to alloc/free for the duration
				   of the transaction?
				 */
				if (pa->buffer_size < size) {
					HPI_DEBUG_LOG(DEBUG,
						"realloc adapter %d stream "
						"buffer from %zd to %d\n",
						hm->h.adapter_index,
						pa->buffer_size, size);
					if (pa->p_buffer) {
						pa->buffer_size = 0;
						vfree(pa->p_buffer);
					}
					pa->p_buffer = vmalloc(size);
					if (pa->p_buffer)
						pa->buffer_size = size;
					else {
						HPI_DEBUG_LOG(ERROR,
							"HPI could not allocate "
							"stream buffer size %d\n",
							size);

						mutex_unlock(&adapters
							[adapter].mutex);
						err = -EINVAL;
						goto out;
					}
				}

				hm->m0.u.d.u.data.pb_data = pa->p_buffer;
				if (hm->h.function == HPI_ISTREAM_READ)
					/* from card, WRITE to user mem */
					wrflag = 1;
				else
					wrflag = 0;
				break;
			}

		default:
			size = 0;
			break;
		}

		if (size && (wrflag == 0)) {
			uncopied_bytes =
				copy_from_user(pa->p_buffer, ptr, size);
			if (uncopied_bytes)
				HPI_DEBUG_LOG(WARNING,
					"missed %d of %d "
					"bytes from user\n", uncopied_bytes,
					size);
		}

		hpi_send_recv_f(&hm->m0, &hr->r0, file);

		if (size && (wrflag == 1)) {
			uncopied_bytes =
				copy_to_user(ptr, pa->p_buffer, size);
			if (uncopied_bytes)
				HPI_DEBUG_LOG(WARNING,
					"missed %d of %d " "bytes to user\n",
					uncopied_bytes, size);
		}

		mutex_unlock(&adapters[adapter].mutex);
	}

	/* on return response size must be set */
	/*printk(KERN_INFO "response size %d\n", hr->h.wSize); */

	if (!hr->h.size) {
		HPI_DEBUG_LOG(ERROR, "response zero size\n");
		err = -EFAULT;
		goto out;
	}

	if (hr->h.size > res_max_size) {
		HPI_DEBUG_LOG(ERROR, "response too big %d %d\n", hr->h.size,
			res_max_size);
		/*HPI_DEBUG_MESSAGE(ERROR, hm); */
		err = -EFAULT;
		goto out;
	}

	uncopied_bytes = copy_to_user(puhr, hr, hr->h.size);
	if (uncopied_bytes) {
		HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
		err = -EFAULT;
		goto out;
	}

out:
	kfree(hm);
	kfree(hr);
	return err;
}
Пример #16
0
static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
	struct cg14_par *par = (struct cg14_par *) info->par;
	struct cg14_regs __iomem *regs = par->regs;
	struct mdi_cfginfo kmdi, __user *mdii;
	unsigned long flags;
	int cur_mode, mode, ret = 0;

	switch (cmd) {
	case MDI_RESET:
		spin_lock_irqsave(&par->lock, flags);
		__cg14_reset(par);
		spin_unlock_irqrestore(&par->lock, flags);
		break;

	case MDI_GET_CFGINFO:
		memset(&kmdi, 0, sizeof(kmdi));

		spin_lock_irqsave(&par->lock, flags);
		kmdi.mdi_type = FBTYPE_MDICOLOR;
		kmdi.mdi_height = info->var.yres;
		kmdi.mdi_width = info->var.xres;
		kmdi.mdi_mode = par->mode;
		kmdi.mdi_pixfreq = 72; /* FIXME */
		kmdi.mdi_size = par->ramsize;
		spin_unlock_irqrestore(&par->lock, flags);

		mdii = (struct mdi_cfginfo __user *) arg;
		if (copy_to_user(mdii, &kmdi, sizeof(kmdi)))
			ret = -EFAULT;
		break;

	case MDI_SET_PIXELMODE:
		if (get_user(mode, (int __user *) arg)) {
			ret = -EFAULT;
			break;
		}

		spin_lock_irqsave(&par->lock, flags);
		cur_mode = sbus_readb(&regs->mcr);
		cur_mode &= ~CG14_MCR_PIXMODE_MASK;
		switch(mode) {
		case MDI_32_PIX:
			cur_mode |= (CG14_MCR_PIXMODE_32 <<
				     CG14_MCR_PIXMODE_SHIFT);
			break;

		case MDI_16_PIX:
			cur_mode |= (CG14_MCR_PIXMODE_16 <<
				     CG14_MCR_PIXMODE_SHIFT);
			break;

		case MDI_8_PIX:
			break;

		default:
			ret = -ENOSYS;
			break;
		}
		if (!ret) {
			sbus_writeb(cur_mode, &regs->mcr);
			par->mode = mode;
		}
		spin_unlock_irqrestore(&par->lock, flags);
		break;

	default:
		ret = sbusfb_ioctl_helper(cmd, arg, info,
					  FBTYPE_MDICOLOR, 8,
					  info->fix.smem_len);
		break;
	}

	return ret;
}
/* ioctl */
static long intel_scu_ioctl(struct file *file, unsigned int cmd,
			    unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	u32 __user *p = argp;
	u32 val;
	int options;

	static const struct watchdog_info ident = {
		.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
		/* @todo Get from SCU via ipc_get_scu_fw_version()? */
		.firmware_version = 0,
		/* len < 32 */
		.identity = "Intel_SCU IOH Watchdog"
	};

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		return copy_to_user(argp, &ident,
				    sizeof(ident)) ? -EFAULT : 0;
	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		return put_user(0, p);
	case WDIOC_KEEPALIVE:
		pr_warn("%s: KeepAlive ioctl\n", __func__);
		if (!watchdog_device.started)
			return -EINVAL;

		watchdog_keepalive();
		return 0;
	case WDIOC_SETPRETIMEOUT:
		pr_warn("%s: SetPreTimeout ioctl is deprecated\n", __func__);

		if (watchdog_device.started)
			return -EBUSY;

		/* Timeout to warn */
		if (get_user(val, p))
			return -EFAULT;

		pre_timeout = val;
		return 0;
	case WDIOC_SETTIMEOUT:
		pr_warn("%s: SetTimeout ioctl\n", __func__);

		if (get_user(val, p))
			return -EFAULT;

		timeout = val;

		if (check_timeouts(pre_timeout, timeout)) {
			pr_warn("%s: Invalid thresholds\n",
				__func__);
			return -EINVAL;
		}
		if (watchdog_config_and_start(timeout, pre_timeout))
			return -EINVAL;

		return 0;
	case WDIOC_GETTIMEOUT:
		return put_user(timeout, p);
	case WDIOC_SETOPTIONS:
		if (get_user(options, p))
			return -EFAULT;

		if (options & WDIOS_DISABLECARD) {
			pr_warn("%s: Stopping the watchdog\n", __func__);
			watchdog_stop();
			return 0;
		}

		if (options & WDIOS_ENABLECARD) {
			pr_warn("%s: Starting the watchdog\n", __func__);

			if (watchdog_device.started)
				return -EBUSY;

			if (check_timeouts(pre_timeout, timeout)) {
				pr_warn("%s: Invalid thresholds\n",
					__func__);
				return -EINVAL;
			}
			if (watchdog_config_and_start(timeout, pre_timeout))
				return -EINVAL;
			return 0;
		}
		return 0;
	default:
		return -ENOTTY;
	}
}

static int watchdog_set_reset_type(int reset_type)
{
	int ret;
	struct ipc_wd_on_timeout {
		u32 reset_type;
	} ipc_wd_on_timeout = { reset_type };

	ret = rpmsg_send_command(watchdog_instance, IPC_WATCHDOG,
				 SCU_WATCHDOG_SET_ACTION_ON_TIMEOUT,
				 (u8 *)&ipc_wd_on_timeout, NULL,
				 sizeof(ipc_wd_on_timeout), 0);
	if (ret) {
		pr_crit("Error setting watchdog action: %d\n", ret);
		return -EIO;
	}

	watchdog_device.normal_wd_action = reset_type;

	return 0;
}
Пример #18
0
/*
 * Handle {get,set,swap}_context operations
 */
int sys_swapcontext(struct ucontext __user *old_ctx,
		    struct ucontext __user *new_ctx,
		    long ctx_size, long r6, long r7, long r8, struct pt_regs *regs)
{
	unsigned char tmp;
	sigset_t set;
	unsigned long new_msr = 0;
	int ctx_has_vsx_region = 0;

	BUG_ON(regs != current->thread.regs);

	if (new_ctx &&
	    get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
		return -EFAULT;
	/*
	 * Check that the context is not smaller than the original
	 * size (with VMX but without VSX)
	 */
	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
		return -EINVAL;
	/*
	 * If the new context state sets the MSR VSX bits but
	 * it doesn't provide VSX state.
	 */
	if ((ctx_size < sizeof(struct ucontext)) &&
	    (new_msr & MSR_VSX))
		return -EINVAL;
	/* Does the context have enough room to store VSX data? */
	if (ctx_size >= sizeof(struct ucontext))
		ctx_has_vsx_region = 1;

	if (old_ctx != NULL) {
		if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
		    || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0,
					ctx_has_vsx_region)
		    || __copy_to_user(&old_ctx->uc_sigmask,
				      &current->blocked, sizeof(sigset_t)))
			return -EFAULT;
	}
	if (new_ctx == NULL)
		return 0;
	if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
	    || __get_user(tmp, (u8 __user *) new_ctx)
	    || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
		return -EFAULT;

	/*
	 * If we get a fault copying the context into the kernel's
	 * image of the user's registers, we can't just return -EFAULT
	 * because the user's registers will be corrupted.  For instance
	 * the NIP value may have been updated but not some of the
	 * other registers.  Given that we have done the access_ok
	 * and successfully read the first and last bytes of the region
	 * above, this should only happen in an out-of-memory situation
	 * or if another thread unmaps the region containing the context.
	 * We kill the task with a SIGSEGV in this situation.
	 */

	if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
		do_exit(SIGSEGV);
	set_current_blocked(&set);
	if (restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext))
		do_exit(SIGSEGV);

	/* This returns like rt_sigreturn */
	set_thread_flag(TIF_RESTOREALL);
	return 0;
}
Пример #19
0
int scull_ioctl(struct inode *inode, struct file *filp,
                 unsigned int cmd, unsigned long arg)
{

    int err = 0, tmp;
    
    /*
     * extract the type and number bitfields, and don't decode
     * wrong cmds: return ENOTTY before verify_area()
     */
    if (_IOC_TYPE(cmd) != SCULL_IOC_MAGIC) return -ENOTTY;
    if (_IOC_NR(cmd) > SCULL_IOC_MAXNR) return -ENOTTY;

    /*
     * the direction is a bitmask, and VERIFY_WRITE catches R/W
     * transfers. `Type' is user-oriented, while
     * verify_area is kernel-oriented, so the concept of "read" and
     * "write" is reversed
     */
    if (_IOC_DIR(cmd) & _IOC_READ)
        err = verify_area(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
    else if (_IOC_DIR(cmd) & _IOC_WRITE)
        err =  verify_area(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
    if (err) return err;

    switch(cmd) {

      case SCULL_IOCRESET:
        scull_quantum = SCULL_QUANTUM;
        scull_qset = SCULL_QSET;
        break;
        
      case SCULL_IOCSQUANTUM: /* Set: arg points to the value */
        scull_quantum = get_user((int *)arg);
        break;

      case SCULL_IOCTQUANTUM: /* Tell: arg is the value */
        scull_quantum = arg;
        break;

      case SCULL_IOCGQUANTUM: /* Get: arg is pointer to result */
        put_user(scull_quantum, (int *)arg);
        break;

      case SCULL_IOCQQUANTUM: /* Query: return it (it's positive) */
        return scull_quantum;

      case SCULL_IOCXQUANTUM: /* eXchange: use arg as pointer */
        tmp = scull_quantum;
        scull_quantum = get_user((int *)arg);
        put_user(tmp, (int *)arg);
        break;

      case SCULL_IOCHQUANTUM: /* sHift: like Tell + Query */
        tmp = scull_quantum;
        scull_quantum = arg;
        return tmp;
        
      case SCULL_IOCSQSET:
        scull_qset = get_user((int *)arg);
        break;

      case SCULL_IOCTQSET:
        scull_qset = arg;
        break;

      case SCULL_IOCGQSET:
        put_user(scull_qset, (int *)arg);
        break;

      case SCULL_IOCQQSET:
        return scull_qset;

      case SCULL_IOCXQSET:
        tmp = scull_qset;
        scull_qset = get_user((int *)arg);
        put_user(tmp, (int *)arg);
        break;

      case SCULL_IOCHQSET:
        tmp = scull_qset;
        scull_quantum = arg;
        return tmp;

        /*
         * The following two change the buffer size for scullpipe.
         * The scullpipe device uses this same ioctl method, just to
         * write less code. Actually, it's the same driver, isn't it?
         */

      case SCULL_P_IOCTSIZE:
        scull_p_buffer = arg;
        break;

      case SCULL_P_IOCQSIZE:
        return scull_p_buffer;


      default:  /* redundant, as cmd was checked against MAXNR */
        return -ENOTTY;
    }
    return 0;

}