Example #1
0
static ssize_t reg_read(struct file *file, const char __user *ubuf,
						size_t count, loff_t *ppos)
{
	struct seq_file *s = file->private_data;
	struct msm_fb_data_type *mfd = s->private;
	struct mdss_panel_data *pdata;
	struct mdss_mdp_ctl *ctl;
	u8 params[3]; /* No more than reg + two parameters is allowed */
	char *buf;
	const char *p;
	int ret;
	int nbr_bytes_to_read;
	int i;
	int j;
	enum dbg_cmd_type cmd;
	struct dsi_cmd_desc dsi;
	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;

	if (!mfd->panel_power_on) {
		pr_err("%s: panel is NOT on\n", __func__);
		goto exit;
	}

	ctl = mfd_to_ctl(mfd);
	if (!ctl)
		goto exit;

	if (mutex_lock_interruptible(&ctl->lock))
		goto exit;

	pdata = dev_get_platdata(&mfd->pdev->dev);
	if (!pdata) {
		pr_err("no panel connected\n");
		goto unlock_exit;
	}

	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
				panel_data);
	if (!ctrl_pdata) {
		pr_err("%s: Invalid input data\n", __func__);
		goto unlock_exit;
	}

	pr_err("%s\n", __func__);

	ret = setup_reg_access(&buf, ubuf, count);
	if (ret)
		goto unlock_exit;

	ret = get_cmd_type(buf, &cmd);
	if (ret) {
		update_res_buf("Read - unknown type\n");
		goto fail_free_all;
	}

	p = buf;
	p = p+4;

	/* Get nbr_bytes_to_read */
	if (sscanf(p, "%d", &nbr_bytes_to_read) != 1) {
		update_res_buf("Read - parameter error\n");
		ret = -EINVAL;
		goto fail_free_all;
	}

	while (isxdigit(*p) || (*p == 'x'))
		p++;

	pr_err("nbr_bytes_to_read = %d\n", nbr_bytes_to_read);
	i = 0;

	ret = get_parameters(p, params, ARRAY_SIZE(params), &i);
	if (ret)
		goto fail_free_all;

	ret = prepare_for_reg_access(mfd);
	if (ret)
		goto fail_free_all;

	if (cmd == DCS) {
		dsi.dchdr.dtype = DTYPE_DCS_READ;
	} else {
		if (i == 1) { /* 0 parameters */
			dsi.dchdr.dtype = DTYPE_GEN_READ;
		} else if (i == 2) { /* 1 parameter */
			dsi.dchdr.dtype = DTYPE_GEN_READ1;
		} else { /* 2 paramters */
			dsi.dchdr.dtype = DTYPE_GEN_READ2;
		}
	}
	dsi.dchdr.last = 1;
	dsi.dchdr.vc = 0;
	dsi.dchdr.ack = 1;
	dsi.dchdr.wait = 5;
	dsi.dchdr.dlen = i;
	dsi.payload = params;

	pr_err("dtype=%d, last=%d, vc=%d, ack=%d, wait=%d, dlen=%d\n",
		dsi.dchdr.dtype, dsi.dchdr.last, dsi.dchdr.vc, dsi.dchdr.ack,
		dsi.dchdr.wait, dsi.dchdr.dlen);
	for (j = 0; j < i; j++)
		pr_err("payload[%d] = 0x%x\n", j, dsi.payload[j]);

	mdss_dsi_cmds_rx(ctrl_pdata, &dsi, nbr_bytes_to_read);

	ret = post_reg_access(mfd);
	if (ret)
		goto fail_free_all;

	print_params(dsi.dchdr.dtype, params[0], ctrl_pdata->rx_buf.len,
			ctrl_pdata->rx_buf.data);

fail_free_all:
	kfree(buf);
unlock_exit:
	mutex_unlock(&ctl->lock);
exit:
	return count;
}
Example #2
0
static ssize_t wdm_write
(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
	u8 *buf;
	int rv = -EMSGSIZE, r, we;
	struct wdm_device *desc = file->private_data;
	struct usb_ctrlrequest *req;

	if (count > desc->wMaxCommand)
		count = desc->wMaxCommand;

	spin_lock_irq(&desc->iuspin);
	we = desc->werr;
	desc->werr = 0;
	spin_unlock_irq(&desc->iuspin);
	if (we < 0)
		return -EIO;

	buf = kmalloc(count, GFP_KERNEL);
	if (!buf) {
		rv = -ENOMEM;
		goto outnl;
	}

	r = copy_from_user(buf, buffer, count);
	if (r > 0) {
		kfree(buf);
		rv = -EFAULT;
		goto outnl;
	}

	/* concurrent writes and disconnect */
	r = mutex_lock_interruptible(&desc->wlock);
	rv = -ERESTARTSYS;
	if (r) {
		kfree(buf);
		goto outnl;
	}

	if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
		kfree(buf);
		rv = -ENODEV;
		goto outnp;
	}

	r = usb_autopm_get_interface(desc->intf);
	if (r < 0) {
		kfree(buf);
		goto outnp;
	}

	if (!(file->f_flags & O_NONBLOCK))
		r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
								&desc->flags));
	else
		if (test_bit(WDM_IN_USE, &desc->flags))
			r = -EAGAIN;
	if (r < 0) {
		kfree(buf);
		goto out;
	}

	req = desc->orq;
	usb_fill_control_urb(
		desc->command,
		interface_to_usbdev(desc->intf),
		/* using common endpoint 0 */
		usb_sndctrlpipe(interface_to_usbdev(desc->intf), 0),
		(unsigned char *)req,
		buf,
		count,
		wdm_out_callback,
		desc
	);

	req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS |
			     USB_RECIP_INTERFACE);
	req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
	req->wValue = 0;
	req->wIndex = desc->inum;
	req->wLength = cpu_to_le16(count);
	set_bit(WDM_IN_USE, &desc->flags);
	desc->outbuf = buf;

	rv = usb_submit_urb(desc->command, GFP_KERNEL);
	if (rv < 0) {
		kfree(buf);
		desc->outbuf = NULL;
		clear_bit(WDM_IN_USE, &desc->flags);
		dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
	} else {
		dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
			req->wIndex);
	}
out:
	usb_autopm_put_interface(desc->intf);
outnp:
	mutex_unlock(&desc->wlock);
outnl:
	return rv < 0 ? rv : count;
}
Example #3
0
ssize_t vivid_radio_rx_read(struct file *file, char __user *buf,
			 size_t size, loff_t *offset)
{
	struct vivid_dev *dev = video_drvdata(file);
	struct timespec ts;
	struct v4l2_rds_data *data = dev->rds_gen.data;
	bool use_alternates;
	unsigned blk;
	int perc;
	int i;

	if (dev->radio_rx_rds_controls)
		return -EINVAL;
	if (size < sizeof(*data))
		return 0;
	size = sizeof(*data) * (size / sizeof(*data));

	if (mutex_lock_interruptible(&dev->mutex))
		return -ERESTARTSYS;
	if (dev->radio_rx_rds_owner &&
	    file->private_data != dev->radio_rx_rds_owner) {
		mutex_unlock(&dev->mutex);
		return -EBUSY;
	}
	if (dev->radio_rx_rds_owner == NULL) {
		vivid_radio_rds_init(dev);
		dev->radio_rx_rds_owner = file->private_data;
	}

retry:
	ktime_get_ts(&ts);
	use_alternates = ts.tv_sec % 10 >= 5;
	if (dev->radio_rx_rds_last_block == 0 ||
	    dev->radio_rx_rds_use_alternates != use_alternates) {
		dev->radio_rx_rds_use_alternates = use_alternates;
		/* Re-init the RDS generator */
		vivid_radio_rds_init(dev);
	}
	ts = timespec_sub(ts, dev->radio_rds_init_ts);
	blk = ts.tv_sec * 100 + ts.tv_nsec / 10000000;
	blk = (blk * VIVID_RDS_GEN_BLOCKS) / 500;
	if (blk >= dev->radio_rx_rds_last_block + VIVID_RDS_GEN_BLOCKS)
		dev->radio_rx_rds_last_block = blk - VIVID_RDS_GEN_BLOCKS + 1;

	/*
	 * No data is available if there hasn't been time to get new data,
	 * or if the RDS receiver has been disabled, or if we use the data
	 * from the RDS transmitter and that RDS transmitter has been disabled,
	 * or if the signal quality is too weak.
	 */
	if (blk == dev->radio_rx_rds_last_block || !dev->radio_rx_rds_enabled ||
	    (dev->radio_rds_loop && !(dev->radio_tx_subchans & V4L2_TUNER_SUB_RDS)) ||
	    abs(dev->radio_rx_sig_qual) > 200) {
		mutex_unlock(&dev->mutex);
		if (file->f_flags & O_NONBLOCK)
			return -EWOULDBLOCK;
		if (msleep_interruptible(20) && signal_pending(current))
			return -EINTR;
		if (mutex_lock_interruptible(&dev->mutex))
			return -ERESTARTSYS;
		goto retry;
	}

	/* abs(dev->radio_rx_sig_qual) <= 200, map that to a 0-50% range */
	perc = abs(dev->radio_rx_sig_qual) / 4;

	for (i = 0; i < size && blk > dev->radio_rx_rds_last_block;
			dev->radio_rx_rds_last_block++) {
		unsigned data_blk = dev->radio_rx_rds_last_block % VIVID_RDS_GEN_BLOCKS;
		struct v4l2_rds_data rds = data[data_blk];

		if (data_blk == 0 && dev->radio_rds_loop)
			vivid_radio_rds_init(dev);
		if (perc && prandom_u32_max(100) < perc) {
			switch (prandom_u32_max(4)) {
			case 0:
				rds.block |= V4L2_RDS_BLOCK_CORRECTED;
				break;
			case 1:
				rds.block |= V4L2_RDS_BLOCK_INVALID;
				break;
			case 2:
				rds.block |= V4L2_RDS_BLOCK_ERROR;
				rds.lsb = prandom_u32_max(256);
				rds.msb = prandom_u32_max(256);
				break;
			case 3: /* Skip block altogether */
				if (i)
					continue;
				/*
				 * Must make sure at least one block is
				 * returned, otherwise the application
				 * might think that end-of-file occurred.
				 */
				break;
			}
		}
		if (copy_to_user(buf + i, &rds, sizeof(rds))) {
			i = -EFAULT;
			break;
		}
		i += sizeof(rds);
	}
	mutex_unlock(&dev->mutex);
	return i;
}
Example #4
0
/* I2C */
static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
			   int num)
{
	struct dvb_usb_device *d = i2c_get_adapdata(adap);
	struct az6007_device_state *st = d_to_priv(d);
	int i, j, len;
	int ret = 0;
	u16 index;
	u16 value;
	int length;
	u8 req, addr;

	if (mutex_lock_interruptible(&st->mutex) < 0)
		return -EAGAIN;

	for (i = 0; i < num; i++) {
		addr = msgs[i].addr << 1;
		if (((i + 1) < num)
		    && (msgs[i].len == 1)
		    && ((msgs[i].flags & I2C_M_RD) != I2C_M_RD)
		    && (msgs[i + 1].flags & I2C_M_RD)
		    && (msgs[i].addr == msgs[i + 1].addr)) {
			/*
			 * A write + read xfer for the same address, where
			 * the first xfer has just 1 byte length.
			 * Need to join both into one operation
			 */
			if (az6007_xfer_debug)
				printk(KERN_DEBUG "az6007: I2C W/R addr=0x%x len=%d/%d\n",
				       addr, msgs[i].len, msgs[i + 1].len);
			req = AZ6007_I2C_RD;
			index = msgs[i].buf[0];
			value = addr | (1 << 8);
			length = 6 + msgs[i + 1].len;
			len = msgs[i + 1].len;
			ret = __az6007_read(d->udev, req, value, index,
					    st->data, length);
			if (ret >= len) {
				for (j = 0; j < len; j++)
					msgs[i + 1].buf[j] = st->data[j + 5];
			} else
				ret = -EIO;
			i++;
		} else if (!(msgs[i].flags & I2C_M_RD)) {
			/* write bytes */
			if (az6007_xfer_debug)
				printk(KERN_DEBUG "az6007: I2C W addr=0x%x len=%d\n",
				       addr, msgs[i].len);
			req = AZ6007_I2C_WR;
			index = msgs[i].buf[0];
			value = addr | (1 << 8);
			length = msgs[i].len - 1;
			len = msgs[i].len - 1;
			for (j = 0; j < len; j++)
				st->data[j] = msgs[i].buf[j + 1];
			ret =  __az6007_write(d->udev, req, value, index,
					      st->data, length);
		} else {
			/* read bytes */
			if (az6007_xfer_debug)
				printk(KERN_DEBUG "az6007: I2C R addr=0x%x len=%d\n",
				       addr, msgs[i].len);
			req = AZ6007_I2C_RD;
			index = msgs[i].buf[0];
			value = addr;
			length = msgs[i].len + 6;
			len = msgs[i].len;
			ret = __az6007_read(d->udev, req, value, index,
					    st->data, length);
			for (j = 0; j < len; j++)
				msgs[i].buf[j] = st->data[j + 5];
		}
		if (ret < 0)
			goto err;
	}
err:
	mutex_unlock(&st->mutex);

	if (ret < 0) {
		pr_info("%s ERROR: %i\n", __func__, ret);
		return ret;
	}
	return num;
}
Example #5
0
/*****************************************
* H2C Msg format :
* 0x1DF - 0x1D0
*| 31 - 8	| 7-5 	 4 - 0	|
*| h2c_msg 	|Class_ID CMD_ID	|
*
* Extend 0x1FF - 0x1F0
*|31 - 0	  |
*|ext_msg|
******************************************/
static void _rtl8821au_fill_h2c_cmd(struct rtl_priv *rtlpriv, 
					u8 element_id, u32 cmd_len, 
					u8 *cmdbuffer)
{
	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
	uint8_t bcmd_down = _FALSE;
	int32_t retry_cnts = 100;
	uint8_t h2c_box_num;
	uint32_t msgbox_addr;
	uint32_t msgbox_ex_addr;
	uint8_t cmd_idx, ext_cmd_len;
	uint32_t h2c_cmd = 0;
	uint32_t h2c_cmd_ex = 0;
	int _unused;

	_unused = mutex_lock_interruptible(&(rtl_usbdev(rtlpriv)->h2c_fwcmd_mutex));

	if (!cmdbuffer) {
		goto exit;
	}
	if (cmd_len > RTL8812_MAX_CMD_LEN) {
		goto exit;
	}

	if (rtlpriv->bSurpriseRemoved == _TRUE)
		goto exit;

	/* pay attention to if  race condition happened in  H2C cmd setting. */
	do {
		h2c_box_num = rtlhal->last_hmeboxnum;

		if (!_is_fw_read_cmd_down(rtlpriv, h2c_box_num)) {
			RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, " fw read cmd failed...\n");
			goto exit;
		}

		*(uint8_t *)(&h2c_cmd) = element_id;

		if (cmd_len <= 3) {
			memcpy((uint8_t *)(&h2c_cmd)+1, cmdbuffer, cmd_len);
		} else {
			memcpy((uint8_t *)(&h2c_cmd)+1, cmdbuffer, 3);
			ext_cmd_len = cmd_len-3;
			memcpy((uint8_t *)(&h2c_cmd_ex), cmdbuffer+3, ext_cmd_len);

			/* Write Ext command */
			msgbox_ex_addr = REG_HMEBOX_EXT0_8812 + (h2c_box_num * RTL8812_EX_MESSAGE_BOX_SIZE);
#ifdef CONFIG_H2C_EF
			for (cmd_idx = 0; cmd_idx < ext_cmd_len; cmd_idx++) {
				rtl_write_byte(rtlpriv, msgbox_ex_addr+cmd_idx, *((uint8_t *)(&h2c_cmd_ex)+cmd_idx));
			}
#else
			h2c_cmd_ex = le32_to_cpu(h2c_cmd_ex);
			rtl_write_dword(rtlpriv, msgbox_ex_addr, h2c_cmd_ex);
#endif
		}
		/* Write command */
		msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * RTL8812_MESSAGE_BOX_SIZE);
#ifdef CONFIG_H2C_EF
		for (cmd_idx = 0; cmd_idx < RTL8812_MESSAGE_BOX_SIZE; cmd_idx++) {
			rtl_write_byte(rtlpriv, msgbox_addr+cmd_idx, *((uint8_t *)(&h2c_cmd)+cmd_idx));
		}
#else
		h2c_cmd = le32_to_cpu(h2c_cmd);
		rtl_write_dword(rtlpriv, msgbox_addr, h2c_cmd);
#endif

		bcmd_down = _TRUE;

	/*
	 * 	DBG_8192C("MSG_BOX:%d,CmdLen(%d), reg:0x%x =>h2c_cmd:0x%x, reg:0x%x =>h2c_cmd_ex:0x%x ..\n"
	 * 	 	,pHalData->LastHMEBoxNum ,CmdLen,msgbox_addr,h2c_cmd,msgbox_ex_addr,h2c_cmd_ex);
	 */

		rtlhal->last_hmeboxnum = (h2c_box_num+1) % RTL8812_MAX_H2C_BOX_NUMS;

	} while ((!bcmd_down) && (retry_cnts--));

exit:

	mutex_unlock(&(rtl_usbdev(rtlpriv)->h2c_fwcmd_mutex));
}
Example #6
0
static ssize_t
write_aw(
	struct file *file, const char __user *buffer,
	size_t count, loff_t *ppos)
{
	DEFINE_WAIT(wait);
	struct aw_usb_data *aw = &aw_instance;

	unsigned long copy_size;
	unsigned long bytes_written = 0;
	unsigned int partial;

	int result = 0;
	int maxretry;
	int errn = 0;
	int intr;

	intr = mutex_lock_interruptible(&(aw->lock));
	if (intr)
		return -EINTR;
	/* Sanity check to make sure aw is connected, powered, etc */
	if (aw->present == 0 || aw->aw_dev == NULL) {
		mutex_unlock(&(aw->lock));
		return -ENODEV;
	}

	do {
		unsigned long thistime;
		char *obuf = aw->obuf;

		copy_size = (count >= OBUF_SIZE) ? OBUF_SIZE : count;
		thistime = copy_size;
		if (copy_from_user(aw->obuf, buffer, copy_size)) {
			errn = -EFAULT;
			goto error;
		}
		maxretry = 5;
		while (thistime) {
			if (!aw->aw_dev) {
				errn = -ENODEV;
				goto error;
			}
			if (signal_pending(current)) {
				mutex_unlock(&(aw->lock));
				return bytes_written ? bytes_written : -EINTR;
			}

			result = usb_bulk_msg(aw->aw_dev,
					 usb_sndbulkpipe(aw->aw_dev, 2),
					 obuf, thistime, &partial, 5000);

			pr_debug(
				"write stats: result:%d thistime:%lu partial:%u",
						result, thistime, partial);

			if (result == -ETIMEDOUT) {
				/* NAK - so hold for a while */
				if (!maxretry--) {
					errn = -ETIME;
					goto error;
				}
				prepare_to_wait(
					&aw->wait_q, &wait, TASK_INTERRUPTIBLE);
				schedule_timeout(NAK_TIMEOUT);
				finish_wait(&aw->wait_q, &wait);
				continue;
			} else if (!result && partial) {
				obuf += partial;
				thistime -= partial;
			} else {
				break;
			}
		};
		if (result) {
			dev_err(&aw->aw_dev->dev, "Write Whoops - %x", result);
			errn = -EIO;
			goto error;
		}
		bytes_written += copy_size;
		count -= copy_size;
		buffer += copy_size;
	} while (count > 0);

	mutex_unlock(&(aw->lock));

	return bytes_written ? bytes_written : -EIO;

error:
	mutex_unlock(&(aw->lock));
	return errn;
}
static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
			    __u8 *buf, size_t count, unsigned char rtype)
{
	struct uhid_device *uhid = hid->driver_data;
	__u8 report_type;
	struct uhid_event *ev;
	unsigned long flags;
	int ret;
	size_t uninitialized_var(len);
	struct uhid_feature_answer_req *req;

	if (!uhid->running)
		return -EIO;

	switch (rtype) {
	case HID_FEATURE_REPORT:
		report_type = UHID_FEATURE_REPORT;
		break;
	case HID_OUTPUT_REPORT:
		report_type = UHID_OUTPUT_REPORT;
		break;
	case HID_INPUT_REPORT:
		report_type = UHID_INPUT_REPORT;
		break;
	default:
		return -EINVAL;
	}

	ret = mutex_lock_interruptible(&uhid->report_lock);
	if (ret)
		return ret;

	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
	if (!ev) {
		ret = -ENOMEM;
		goto unlock;
	}

	spin_lock_irqsave(&uhid->qlock, flags);
	ev->type = UHID_FEATURE;
	ev->u.feature.id = atomic_inc_return(&uhid->report_id);
	ev->u.feature.rnum = rnum;
	ev->u.feature.rtype = report_type;

	atomic_set(&uhid->report_done, 0);
	uhid_queue(uhid, ev);
	spin_unlock_irqrestore(&uhid->qlock, flags);

	ret = wait_event_interruptible_timeout(uhid->report_wait,
				atomic_read(&uhid->report_done), 5 * HZ);

	/*
	 * Make sure "uhid->running" is cleared on shutdown before
	 * "uhid->report_done" is set.
	 */
	smp_rmb();
	if (!ret || !uhid->running) {
		ret = -EIO;
	} else if (ret < 0) {
		ret = -ERESTARTSYS;
	} else {
		spin_lock_irqsave(&uhid->qlock, flags);
		req = &uhid->report_buf.u.feature_answer;

		if (req->err) {
			ret = -EIO;
		} else {
			ret = 0;
			len = min(count,
				min_t(size_t, req->size, UHID_DATA_MAX));
			memcpy(buf, req->data, len);
		}

		spin_unlock_irqrestore(&uhid->qlock, flags);
	}

	atomic_set(&uhid->report_done, 1);

unlock:
	mutex_unlock(&uhid->report_lock);
	return ret ? ret : len;
}
Example #8
0
/* XXX: the blocking 'wait' mechanism hasn't been tested yet */
int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
					void *data, int len, bool wait)
{
	struct virtproc_info *vrp = rpdev->vrp;
	struct device *dev = &rpdev->dev;
	struct scatterlist sg;
	struct rpmsg_hdr *msg;
	int err;
	unsigned long offset;
	void *sim_addr;

	if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) {
		dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst);
		return -EINVAL;
	}

	/* the payload's size is currently limited */
	if (len > vrp->buf_size - sizeof(struct rpmsg_hdr)) {
		dev_err(dev, "message is too big (%d)\n", len);
		return -EMSGSIZE;
	}

	/*
	 * protect svq from simultaneous concurrent manipulations,
	 * and serialize the sending of messages
	 */
	if (mutex_lock_interruptible(&vrp->svq_lock))
		return -ERESTARTSYS;
	/* grab a buffer */
	msg = get_a_buf(vrp);
	if (!msg && !wait) {
		err = -ENOMEM;
		goto out;
	}

	/* no free buffer ? wait for one (but bail after 15 seconds) */
	if (!msg) {
		/* enable "tx-complete" interrupts before dozing off */
		virtqueue_enable_cb(vrp->svq);

		/*
		 * sleep until a free buffer is available or 15 secs elapse.
		 * the timeout period is not configurable because frankly
		 * i don't see why drivers need to deal with that.
		 * if later this happens to be required, it'd be easy to add.
		 */
		err = wait_event_interruptible_timeout(vrp->sendq,
					(msg = get_a_buf(vrp)),
					msecs_to_jiffies(15000));

		/* on success, suppress "tx-complete" interrupts again */
		virtqueue_disable_cb(vrp->svq);

		if (err < 0) {
			err = -ERESTARTSYS;
			goto out;
		}

		if (!msg) {
			dev_err(dev, "timeout waiting for buffer\n");
			err = -ETIMEDOUT;
			goto out;
		}
	}

	msg->len = len;
	msg->flags = 0;
	msg->src = src;
	msg->dst = dst;
	msg->unused = 0;
	memcpy(msg->data, data, len);

	dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Unused %d\n",
					msg->src, msg->dst, msg->len,
					msg->flags, msg->unused);
#if 0
	print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
					msg, sizeof(*msg) + msg->len, true);
#endif

	offset = ((unsigned long) msg) - ((unsigned long) vrp->rbufs);
	sim_addr = vrp->sim_base + offset;
	sg_init_one(&sg, sim_addr, sizeof(*msg) + len);

	/* add message to the remote processor's virtqueue */
	err = virtqueue_add_buf_gfp(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
	if (err < 0) {
		dev_err(dev, "virtqueue_add_buf_gfp failed: %d\n", err);
		goto out;
	}
	/* descriptors must be written before kicking remote processor */
	wmb();

	/* tell the remote processor it has a pending message to read */
	virtqueue_kick(vrp->svq);

	err = 0;
out:
	mutex_unlock(&vrp->svq_lock);
	return err;
}
Example #9
0
static int adu_open(struct inode *inode, struct file *file)
{
	struct adu_device *dev = NULL;
	struct usb_interface *interface;
	int subminor;
	int retval;

	dbg(2,"%s : enter", __func__);

	subminor = iminor(inode);

	if ((retval = mutex_lock_interruptible(&adutux_mutex))) {
		dbg(2, "%s : mutex lock failed", __func__);
		goto exit_no_lock;
	}

	interface = usb_find_interface(&adu_driver, subminor);
	if (!interface) {
		printk(KERN_ERR "adutux: %s - error, can't find device for "
		       "minor %d\n", __func__, subminor);
		retval = -ENODEV;
		goto exit_no_device;
	}

	dev = usb_get_intfdata(interface);
	if (!dev || !dev->udev) {
		retval = -ENODEV;
		goto exit_no_device;
	}

	/* check that nobody else is using the device */
	if (dev->open_count) {
		retval = -EBUSY;
		goto exit_no_device;
	}

	++dev->open_count;
	dbg(2,"%s : open count %d", __func__, dev->open_count);

	/* save device in the file's private structure */
	file->private_data = dev;

	/* initialize in direction */
	dev->read_buffer_length = 0;

	/* fixup first read by having urb waiting for it */
	usb_fill_int_urb(dev->interrupt_in_urb,dev->udev,
			 usb_rcvintpipe(dev->udev,
					dev->interrupt_in_endpoint->bEndpointAddress),
			 dev->interrupt_in_buffer,
			 le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
			 adu_interrupt_in_callback, dev,
			 dev->interrupt_in_endpoint->bInterval);
	dev->read_urb_finished = 0;
	if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL))
		dev->read_urb_finished = 1;
	/* we ignore failure */
	/* end of fixup for first read */

	/* initialize out direction */
	dev->out_urb_finished = 1;

	retval = 0;

exit_no_device:
	mutex_unlock(&adutux_mutex);
exit_no_lock:
	dbg(2,"%s : leave, return value %d ", __func__, retval);
	return retval;
}
Example #10
0
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
			 unsigned char __user *buf, size_t nr)
{
	unsigned char __user *b = buf;
	DECLARE_WAITQUEUE(wait, current);
	int c;
	int minimum, time;
	ssize_t retval = 0;
	ssize_t size;
	long timeout;
	unsigned long flags;
	int packet;

do_it_again:

	if (WARN_ON(!tty->read_buf))
		return -EAGAIN;

	c = job_control(tty, file);
	if (c < 0)
		return c;

	minimum = time = 0;
	timeout = MAX_SCHEDULE_TIMEOUT;
	if (!tty->icanon) {
		time = (HZ / 10) * TIME_CHAR(tty);
		minimum = MIN_CHAR(tty);
		if (minimum) {
			if (time)
				tty->minimum_to_wake = 1;
			else if (!waitqueue_active(&tty->read_wait) ||
				 (tty->minimum_to_wake > minimum))
				tty->minimum_to_wake = minimum;
		} else {
			timeout = 0;
			if (time) {
				timeout = time;
				time = 0;
			}
			tty->minimum_to_wake = minimum = 1;
		}
	}

	if (file->f_flags & O_NONBLOCK) {
		if (!mutex_trylock(&tty->atomic_read_lock))
			return -EAGAIN;
	} else {
		if (mutex_lock_interruptible(&tty->atomic_read_lock))
			return -ERESTARTSYS;
	}
	packet = tty->packet;

	add_wait_queue(&tty->read_wait, &wait);
	while (nr) {
		
		if (packet && tty->link->ctrl_status) {
			unsigned char cs;
			if (b != buf)
				break;
			spin_lock_irqsave(&tty->link->ctrl_lock, flags);
			cs = tty->link->ctrl_status;
			tty->link->ctrl_status = 0;
			spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
			if (tty_put_user(tty, cs, b++)) {
				retval = -EFAULT;
				b--;
				break;
			}
			nr--;
			break;
		}
		set_current_state(TASK_INTERRUPTIBLE);

		if (((minimum - (b - buf)) < tty->minimum_to_wake) &&
		    ((minimum - (b - buf)) >= 1))
			tty->minimum_to_wake = (minimum - (b - buf));

		if (!input_available_p(tty, 0)) {
			if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
				retval = -EIO;
				break;
			}
			if (tty_hung_up_p(file))
				break;
			if (!timeout)
				break;
			if (file->f_flags & O_NONBLOCK) {
				retval = -EAGAIN;
				break;
			}
			if (signal_pending(current)) {
				retval = -ERESTARTSYS;
				break;
			}
			n_tty_set_room(tty);
			timeout = schedule_timeout(timeout);
			BUG_ON(!tty->read_buf);
			continue;
		}
		__set_current_state(TASK_RUNNING);

		
		if (packet && b == buf) {
			if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
				retval = -EFAULT;
				b--;
				break;
			}
			nr--;
		}

		if (tty->icanon && !L_EXTPROC(tty)) {
			
			while (nr && tty->read_cnt) {
				int eol;

				eol = test_and_clear_bit(tty->read_tail,
						tty->read_flags);
				c = tty->read_buf[tty->read_tail];
				spin_lock_irqsave(&tty->read_lock, flags);
				tty->read_tail = ((tty->read_tail+1) &
						  (N_TTY_BUF_SIZE-1));
				tty->read_cnt--;
				if (eol) {
					if (--tty->canon_data < 0)
						tty->canon_data = 0;
				}
				spin_unlock_irqrestore(&tty->read_lock, flags);

				if (!eol || (c != __DISABLED_CHAR)) {
					if (tty_put_user(tty, c, b++)) {
						retval = -EFAULT;
						b--;
						break;
					}
					nr--;
				}
				if (eol) {
					tty_audit_push(tty);
					break;
				}
			}
			if (retval)
				break;
		} else {
			int uncopied;
			uncopied = copy_from_read_buf(tty, &b, &nr);
			uncopied += copy_from_read_buf(tty, &b, &nr);
			if (uncopied) {
				retval = -EFAULT;
				break;
			}
		}

		if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
			n_tty_set_room(tty);
			check_unthrottle(tty);
		}

		if (b - buf >= minimum)
			break;
		if (time)
			timeout = time;
	}
	mutex_unlock(&tty->atomic_read_lock);
	remove_wait_queue(&tty->read_wait, &wait);

	if (!waitqueue_active(&tty->read_wait))
		tty->minimum_to_wake = minimum;

	__set_current_state(TASK_RUNNING);
	size = b - buf;
	if (size) {
		retval = size;
		if (nr)
			clear_bit(TTY_PUSH, &tty->flags);
	} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
		goto do_it_again;

	n_tty_set_room(tty);
	return retval;
}
Example #11
0
static long vpu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct miscdevice *dev = filp->private_data;
	struct jz_vpu *vpu = container_of(dev, struct jz_vpu, mdev);
	struct flush_cache_info info;
	int ret = 0;
    unsigned int status = 0;

	switch (cmd) {
	case WAIT_COMPLETE:
		ret = wait_for_completion_interruptible_timeout(
			&vpu->done, msecs_to_jiffies(200));
		if (ret > 0) {
		        status = vpu->status;
		} else {
			dev_warn(vpu->dev, "[%d:%d] wait_for_completion timeout\n",
				 current->tgid, current->pid);
			if (vpu_reset(vpu) < 0)
				status = 0;
			vpu->done.done = 0;
		}
		if (copy_to_user((void *)arg, &status, sizeof(status)))
			ret = -EFAULT;
		break;
		
	case LOCK:
		if (vpu->owner_pid == current->pid) {
			dev_err(vpu->dev, "[%d:%d] dead lock\n",
				current->tgid, current->pid);
			ret = -EINVAL;
			break;
		}

		if (mutex_lock_interruptible(&vpu->mutex) != 0) {
			dev_err(vpu->dev, "[%d:%d] lock error!\n",
				current->tgid, current->pid);
			ret = -EIO;
			break;
		}
		vpu->owner_pid = current->pid;
		dev_dbg(vpu->dev, "[%d:%d] lock\n", current->tgid, current->pid);

		break;

	case UNLOCK:
		mutex_unlock(&vpu->mutex);
		vpu->owner_pid = 0;
		dev_dbg(vpu->dev, "[%d:%d] unlock\n", current->tgid, current->pid);
		break;

	case FLUSH_CACHE:
		if (copy_from_user(&info, (void *)arg, sizeof(info))) {
			ret = -EFAULT;
			break;
		}

		dma_cache_sync(NULL, (void *)info.addr, info.len, info.dir);
		dev_dbg(vpu->dev, "[%d:%d] flush cache\n", current->tgid, current->pid);
		break;
	default:
		break;
	}

	return ret;
}
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
	struct task_struct *selected = NULL;
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
	int selected_tasksize = 0;
	int selected_oom_score_adj;
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free;
	int other_file;
	unsigned long nr_to_scan = sc->nr_to_scan;

	if (nr_to_scan > 0) {
		if (mutex_lock_interruptible(&scan_mutex) < 0)
			return 0;
	}

	other_free = global_page_state(NR_FREE_PAGES);
	other_file = global_page_state(NR_FILE_PAGES) -
						global_page_state(NR_SHMEM);

	tune_lmk_param(&other_free, &other_file, sc);

	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;
	for (i = 0; i < array_size; i++) {
		if (other_free < lowmem_minfree[i] &&
		    other_file < lowmem_minfree[i]) {
			min_score_adj = lowmem_adj[i];
			break;
		}
	}
	if (nr_to_scan > 0)
		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
				nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
	rem = global_page_state(NR_ACTIVE_ANON) +
		global_page_state(NR_ACTIVE_FILE) +
		global_page_state(NR_INACTIVE_ANON) +
		global_page_state(NR_INACTIVE_FILE);
	if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
			     nr_to_scan, sc->gfp_mask, rem);

		if (nr_to_scan > 0)
			mutex_unlock(&scan_mutex);

		return rem;
	}
	selected_oom_score_adj = min_score_adj;

	rcu_read_lock();
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;

		if (tsk->flags & PF_KTHREAD)
			continue;

		/* if task no longer has any memory ignore it */
		if (test_task_flag(tsk, TIF_MM_RELEASED))
			continue;

		if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
			if (test_task_flag(tsk, TIF_MEMDIE)) {
				rcu_read_unlock();
				/* give the system time to free up the memory */
				msleep_interruptible(20);
				mutex_unlock(&scan_mutex);
				return 0;
			}
		}

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
	}
	if (selected) {
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize);
		lowmem_deathpending_timeout = jiffies + HZ;
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
		rcu_read_unlock();
		/* give the system time to free up the memory */
		msleep_interruptible(20);
	} else
		rcu_read_unlock();

	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
		     nr_to_scan, sc->gfp_mask, rem);
	mutex_unlock(&scan_mutex);
	return rem;
}
Example #13
0
static int if_ioctl(struct tty_struct *tty, struct file *file,
		    unsigned int cmd, unsigned long arg)
{
	struct cardstate *cs;
	int retval = -ENODEV;
	int int_arg;
	unsigned char buf[6];
	unsigned version[4];

	cs = (struct cardstate *) tty->driver_data;
	if (!cs) {
		pr_err("%s: no cardstate\n", __func__);
		return -ENODEV;
	}

	gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd);

	if (mutex_lock_interruptible(&cs->mutex))
		return -ERESTARTSYS; // FIXME -EINTR?

	if (!cs->connected) {
		gig_dbg(DEBUG_IF, "not connected");
		retval = -ENODEV;
	} else if (!cs->open_count)
		dev_warn(cs->dev, "%s: device not opened\n", __func__);
	else {
		retval = 0;
		switch (cmd) {
		case GIGASET_REDIR:
			retval = get_user(int_arg, (int __user *) arg);
			if (retval >= 0)
				retval = if_lock(cs, &int_arg);
			if (retval >= 0)
				retval = put_user(int_arg, (int __user *) arg);
			break;
		case GIGASET_CONFIG:
			retval = get_user(int_arg, (int __user *) arg);
			if (retval >= 0)
				retval = if_config(cs, &int_arg);
			if (retval >= 0)
				retval = put_user(int_arg, (int __user *) arg);
			break;
		case GIGASET_BRKCHARS:
			retval = copy_from_user(&buf,
					(const unsigned char __user *) arg, 6)
				? -EFAULT : 0;
			if (retval >= 0) {
				gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS",
						6, (const unsigned char *) arg);
				retval = cs->ops->brkchars(cs, buf);
			}
			break;
		case GIGASET_VERSION:
			retval = copy_from_user(version,
					(unsigned __user *) arg, sizeof version)
				? -EFAULT : 0;
			if (retval >= 0)
				retval = if_version(cs, version);
			if (retval >= 0)
				retval = copy_to_user((unsigned __user *) arg,
						      version, sizeof version)
					? -EFAULT : 0;
			break;
		default:
			gig_dbg(DEBUG_ANY, "%s: arg not supported - 0x%04x",
				__func__, cmd);
			retval = -ENOIOCTLCMD;
		}
	}

	mutex_unlock(&cs->mutex);

	return retval;
}
Example #14
0
static ssize_t reg_write(struct file *file, const char __user *ubuf,
						size_t count, loff_t *ppos)
{
	struct seq_file *s = file->private_data;
	struct msm_fb_data_type *mfd = s->private;
	struct mdss_panel_data *pdata;
	struct mdss_mdp_ctl *ctl;
	char *buf;
	const char *p;
	enum dbg_cmd_type cmd;
	u8 data[MAX_WRITE_DATA];
	int i = 0;
	int j;
	int ret;
	struct dsi_cmd_desc dsi;
	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;

	if (!mfd->panel_power_on) {
		pr_err("%s: panel is NOT on\n", __func__);
		goto exit;
	}

	ctl = mfd_to_ctl(mfd);
	if (!ctl)
		goto exit;

	if (mutex_lock_interruptible(&ctl->lock))
		goto exit;

	pdata = dev_get_platdata(&mfd->pdev->dev);
	if (!pdata) {
		pr_err("no panel connected\n");
		goto unlock_exit;
	}

	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
				panel_data);
	if (!ctrl_pdata) {
		pr_err("%s: Invalid input data\n", __func__);
		goto unlock_exit;
	}

	pr_err("%s\n", __func__);
	ret = setup_reg_access(&buf, ubuf, count);
	if (ret)
		goto unlock_exit;

	ret = get_cmd_type(buf, &cmd);
	if (ret) {
		update_res_buf("Write - unknown type\n");
		goto fail_free_all;
	}

	p = buf;
	p = p+4;

	/* Get first param, Register */
	if (sscanf(p, "%4hhx", &data[0]) != 1) {
		update_res_buf("Write - parameter error\n");
		ret = -EINVAL;
		goto fail_free_all;
	}
	i++;

	while (isxdigit(*p) || (*p == 'x'))
		p++;

	ret = get_parameters(p, data, ARRAY_SIZE(data) - 1, &i);
	if (ret)
		goto fail_free_all;

	ret = prepare_for_reg_access(mfd);
	if (ret)
		goto fail_free_all;

	if (cmd == DCS) {
		if (i == 1) { /* 0 parameters */
			dsi.dchdr.dtype = DTYPE_DCS_WRITE;
		} else if (i == 2) { /* 1 parameter */
			dsi.dchdr.dtype = DTYPE_DCS_WRITE1;
		} else { /* Many parameters */
			dsi.dchdr.dtype = DTYPE_DCS_LWRITE;
		}
	} else {
		if (i == 1) { /* 0 parameters */
			dsi.dchdr.dtype = DTYPE_GEN_WRITE;
		} else if (i == 2) { /* 1 parameter */
			dsi.dchdr.dtype = DTYPE_GEN_WRITE1;
		} else if (i == 3) { /* 2 parameters */
			dsi.dchdr.dtype = DTYPE_GEN_WRITE2;
		} else { /* Many parameters */
			dsi.dchdr.dtype = DTYPE_GEN_LWRITE;
		}
	}
	dsi.dchdr.last = 1;
	dsi.dchdr.vc = 0;
	dsi.dchdr.ack = 0;
	dsi.dchdr.wait = 0;
	dsi.dchdr.dlen = i;
	dsi.payload = data;

	pr_err("last = %d, vc = %d, ack = %d, wait = %d, dlen = %d\n",
		dsi.dchdr.last, dsi.dchdr.vc, dsi.dchdr.ack, dsi.dchdr.wait,
		dsi.dchdr.dlen);
	for (j = 0; j < i; j++)
		pr_err("payload[%d] = 0x%x\n", j, dsi.payload[j]);
	mdss_dsi_cmds_tx(ctrl_pdata, &dsi, 1);

	ret = post_reg_access(mfd);
	if (ret)
		goto fail_free_all;

	print_params(dsi.dchdr.dtype, data[0], i, dsi.payload);

fail_free_all:
	kfree(buf);
unlock_exit:
	mutex_unlock(&ctl->lock);
exit:
	return count;
}
Example #15
0
/*
=====================rsz_ioctl===========================
This function	will process IOCTL commands sent by
the application	and
control the device IO operations.
*/
static int rsz_doioctl(struct inode *inode, struct file *file,
		       unsigned int cmd, unsigned long arg)
{
	int ret = 0, prio;
	unsigned mode, user_mode;
	/*get the configuratin of this channel from
	   private_date member of file */
	struct imp_logical_channel *rsz_conf_chan =
	    (struct imp_logical_channel *)file->private_data;

	if (ISNULL(rsz_conf_chan)) {
		dev_err(rsz_device, "channel ptr is null\n");
		return -EFAULT;
	}

	if (ISNULL((void *)arg)) {
		dev_err(rsz_device, "arg ptr is null\n");
		return -EFAULT;
	}

	mode = imp_hw_if->get_resize_oper_mode();
	switch (cmd) {
	case RSZ_QUERYBUF:
	case RSZ_REQBUF:
	case RSZ_RESIZE:
	case RSZ_RECONFIG:
		{
			if (mode == IMP_MODE_CONTINUOUS)
				return -EACCES;
		}
		break;
	}

	switch (cmd) {
	case RSZ_S_OPER_MODE:
		{
			dev_dbg(rsz_device, "RSZ_S_OPER_MODE:\n");
			user_mode = *((unsigned long *)arg);
			if (rsz_conf_chan->mode != IMP_MODE_INVALID) {
				dev_err(rsz_device,
					"Mode set for this channel already\n");
				ret = -EINVAL;
				goto ERROR;
			}

			if (user_mode >= IMP_MODE_INVALID) {
				dev_err(rsz_device, "Invalid mode\n");
				ret = -EINVAL;
				goto ERROR;
			}

			imp_hw_if->set_resize_oper_mode(user_mode);
			mode = imp_hw_if->get_resize_oper_mode();

			if (user_mode != mode) {
				dev_err(rsz_device,
					"Operation mode doesn't match"
					" with current hw mode\n");
				ret = -EINVAL;
				goto ERROR;
			}
			ret = mutex_lock_interruptible(&(rsz_conf_chan->lock));
			if (!ret) {
				rsz_conf_chan->mode = mode;
				mutex_unlock(&(rsz_conf_chan->lock));
			}
			dev_dbg(rsz_device,
				"RSZ_S_OPER_MODE: Operation mode set to %d",
				user_mode);
		}
		break;
		/* if case is to query for buffer address */
	case RSZ_G_OPER_MODE:
		{
			dev_dbg(rsz_device, "RSZ_G_OPER_MODE:\n");
			*((unsigned long *)arg) = rsz_conf_chan->mode;
			dev_dbg(rsz_device,
				"RSZ_G_OPER_MODE: mode = %d",
				rsz_conf_chan->mode);
		}
		break;

	case RSZ_S_CONFIG:
		{
			dev_dbg(rsz_device, "RSZ_S_CONFIG:\n");
			ret = mutex_lock_interruptible(&(rsz_conf_chan->lock));
			if (!ret) {
				ret = imp_set_resizer_config(rsz_device,
						     rsz_conf_chan,
						     (struct rsz_channel_config
						      *)arg);
				mutex_unlock(&(rsz_conf_chan->lock));
			}
		}
		break;

	case RSZ_G_CONFIG:
		{
			struct rsz_channel_config *user_config =
			    (struct rsz_channel_config *)arg;

			dev_err(rsz_device, "RSZ_G_CONFIG:%d:%d:%d\n",
				user_config->oper_mode, user_config->chain,
				user_config->len);
			if (ISNULL(user_config->config)) {
				ret = -EINVAL;
				dev_err(rsz_device,
					"error in PREV_GET_CONFIG\n");
				goto ERROR;
			}
			ret =
			    imp_get_resize_config(rsz_device, rsz_conf_chan,
						  user_config);
		}
		break;

	case RSZ_QUERYBUF:
		{
			dev_dbg(rsz_device, "RSZ_QUERYBUF:\n");
			ret = mutex_lock_interruptible(&(rsz_conf_chan->lock));
			if (!ret) {
				ret = imp_common_query_buffer(rsz_device,
						      rsz_conf_chan,
						      (struct imp_buffer *)arg);
				mutex_unlock(&(rsz_conf_chan->lock));
			}
		}
		break;

		/* if case is to request buffers */
	case RSZ_REQBUF:
		{
			dev_dbg(rsz_device, "RSZ_REQBUF:\n");
			ret = mutex_lock_interruptible(&(rsz_conf_chan->lock));
			if (!ret) {
				ret = imp_common_request_buffer(rsz_device,
							rsz_conf_chan,
							(struct imp_reqbufs *)
							arg);
				mutex_unlock(&(rsz_conf_chan->lock));
			}
		}
		break;
		/* if the case is to do resize */
	case RSZ_S_PRIORITY:
		{
			prio = *((unsigned long *)arg);

			dev_dbg(rsz_device, "RSZ_S_PRIORITY: priority = %d\n",
				prio);
			/* Check the prioroty range and assign the priority */
			if (prio > MAX_PRIORITY || prio < MIN_PRIORITY) {
				ret = -EINVAL;
				goto ERROR;
			} else {
				ret = mutex_lock_interruptible(
					&(rsz_conf_chan->lock));
				if (!ret) {
					rsz_conf_chan->priority = prio;
					mutex_unlock(&(rsz_conf_chan->lock));
				}
			}
			dev_dbg(rsz_device, "\n resizer_Priority:end");
		}
		break;
		/* This ioctl is used to get the priority of
		   the current logic channel */
	case RSZ_G_PRIORITY:
		{
			dev_dbg(rsz_device, "RSZ_S_PRIORITY: \n");
			/* Get the priority     from the channel */
			*((unsigned long *)arg) = rsz_conf_chan->priority;
		}
		break;

	case RSZ_RESIZE:
		{
			dev_dbg(rsz_device, "RSZ_RESIZE: \n");
			ret = mutex_lock_interruptible(&(rsz_conf_chan->lock));
			if (!ret) {
				ret = imp_common_start_resize(rsz_device,
						      rsz_conf_chan,
						      (struct imp_convert *)
						      arg);
				mutex_unlock(&(rsz_conf_chan->lock));
			}
		}
		break;

	case RSZ_RECONFIG:
		{
			dev_dbg(rsz_device, "RSZ_RECONFIG: \n");
			ret = mutex_lock_interruptible(&(rsz_conf_chan->lock));
			if (!ret) {
				ret = imp_common_reconfig_resizer(rsz_device,
						(struct rsz_reconfig *)arg,
						 rsz_conf_chan);
				mutex_unlock(&(rsz_conf_chan->lock));
			}
		}
		break;

#ifdef CONFIG_IMP_DEBUG
	case RSZ_DUMP_HW_CONFIG:
		{
			dev_dbg(rsz_device, "RSZ_DUMP_HW_CONFIG: \n");
			ret = mutex_lock_interruptible(&(rsz_conf_chan->lock));
			if (!ret) {
				if (imp_hw_if->dump_hw_config)
					imp_hw_if->dump_hw_config();
				mutex_unlock(&(rsz_conf_chan->lock));
			}
		}
		break;
#endif
	default:
		dev_dbg(rsz_device, "resizer_ioctl: Invalid Command Value");
		ret = -EINVAL;
	}

ERROR:
	return ret;
}				/*End of function IOCTL */
Example #16
0
static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
			loff_t *ppos)
{
	struct adu_device *dev;
	size_t bytes_read = 0;
	size_t bytes_to_read = count;
	int i;
	int retval = 0;
	int timeout = 0;
	int should_submit = 0;
	unsigned long flags;
	DECLARE_WAITQUEUE(wait, current);

	dbg(2," %s : enter, count = %Zd, file=%p", __func__, count, file);

	dev = file->private_data;
	dbg(2," %s : dev=%p", __func__, dev);

	if (mutex_lock_interruptible(&dev->mtx))
		return -ERESTARTSYS;

	/* verify that the device wasn't unplugged */
	if (dev->udev == NULL) {
		retval = -ENODEV;
		printk(KERN_ERR "adutux: No device or device unplugged %d\n",
		       retval);
		goto exit;
	}

	/* verify that some data was requested */
	if (count == 0) {
		dbg(1," %s : read request of 0 bytes", __func__);
		goto exit;
	}

	timeout = COMMAND_TIMEOUT;
	dbg(2," %s : about to start looping", __func__);
	while (bytes_to_read) {
		int data_in_secondary = dev->secondary_tail - dev->secondary_head;
		dbg(2," %s : while, data_in_secondary=%d, status=%d",
		    __func__, data_in_secondary,
		    dev->interrupt_in_urb->status);

		if (data_in_secondary) {
			/* drain secondary buffer */
			int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary;
			i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount);
			if (i) {
				retval = -EFAULT;
				goto exit;
			}
			dev->secondary_head += (amount - i);
			bytes_read += (amount - i);
			bytes_to_read -= (amount - i);
			if (i) {
				retval = bytes_read ? bytes_read : -EFAULT;
				goto exit;
			}
		} else {
			/* we check the primary buffer */
			spin_lock_irqsave (&dev->buflock, flags);
			if (dev->read_buffer_length) {
				/* we secure access to the primary */
				char *tmp;
				dbg(2," %s : swap, read_buffer_length = %d",
				    __func__, dev->read_buffer_length);
				tmp = dev->read_buffer_secondary;
				dev->read_buffer_secondary = dev->read_buffer_primary;
				dev->read_buffer_primary = tmp;
				dev->secondary_head = 0;
				dev->secondary_tail = dev->read_buffer_length;
				dev->read_buffer_length = 0;
				spin_unlock_irqrestore(&dev->buflock, flags);
				/* we have a free buffer so use it */
				should_submit = 1;
			} else {
				/* even the primary was empty - we may need to do IO */
				if (!dev->read_urb_finished) {
					/* somebody is doing IO */
					spin_unlock_irqrestore(&dev->buflock, flags);
					dbg(2," %s : submitted already", __func__);
				} else {
					/* we must initiate input */
					dbg(2," %s : initiate input", __func__);
					dev->read_urb_finished = 0;
					spin_unlock_irqrestore(&dev->buflock, flags);

					usb_fill_int_urb(dev->interrupt_in_urb,dev->udev,
							 usb_rcvintpipe(dev->udev,
							 		dev->interrupt_in_endpoint->bEndpointAddress),
							 dev->interrupt_in_buffer,
							 le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
							 adu_interrupt_in_callback,
							 dev,
							 dev->interrupt_in_endpoint->bInterval);
					retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
					if (retval) {
						dev->read_urb_finished = 1;
						if (retval == -ENOMEM) {
							retval = bytes_read ? bytes_read : -ENOMEM;
						}
						dbg(2," %s : submit failed", __func__);
						goto exit;
					}
				}

				/* we wait for I/O to complete */
				set_current_state(TASK_INTERRUPTIBLE);
				add_wait_queue(&dev->read_wait, &wait);
				spin_lock_irqsave(&dev->buflock, flags);
				if (!dev->read_urb_finished) {
					spin_unlock_irqrestore(&dev->buflock, flags);
					timeout = schedule_timeout(COMMAND_TIMEOUT);
				} else {
					spin_unlock_irqrestore(&dev->buflock, flags);
					set_current_state(TASK_RUNNING);
				}
				remove_wait_queue(&dev->read_wait, &wait);

				if (timeout <= 0) {
					dbg(2," %s : timeout", __func__);
					retval = bytes_read ? bytes_read : -ETIMEDOUT;
					goto exit;
				}

				if (signal_pending(current)) {
					dbg(2," %s : signal pending", __func__);
					retval = bytes_read ? bytes_read : -EINTR;
					goto exit;
				}
			}
		}
	}

	retval = bytes_read;
	/* if the primary buffer is empty then use it */
	spin_lock_irqsave(&dev->buflock, flags);
	if (should_submit && dev->read_urb_finished) {
		dev->read_urb_finished = 0;
		spin_unlock_irqrestore(&dev->buflock, flags);
		usb_fill_int_urb(dev->interrupt_in_urb,dev->udev,
				 usb_rcvintpipe(dev->udev,
				 		dev->interrupt_in_endpoint->bEndpointAddress),
				dev->interrupt_in_buffer,
				le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
				adu_interrupt_in_callback,
				dev,
				dev->interrupt_in_endpoint->bInterval);
		if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL) != 0)
			dev->read_urb_finished = 1;
		/* we ignore failure */
	} else {
		spin_unlock_irqrestore(&dev->buflock, flags);
	}

exit:
	/* unlock the device */
	mutex_unlock(&dev->mtx);

	dbg(2," %s : leave, return value %d", __func__, retval);
	return retval;
}
Example #17
0
/*
=====================rsz_open===========================
This function creates a channels.
*/
static int rsz_open(struct inode *inode, struct file *filp)
{
	struct imp_logical_channel *rsz_conf_chan;
	int i, mode, ret;

	if (filp->f_flags & O_NONBLOCK) {
		dev_err
		    (rsz_device,
		     "rsz_open: device cannot be opened in non-blocked mode\n");
		return -EBUSY;
	}

	mode = imp_hw_if->get_resize_oper_mode();

	ret = mutex_lock_interruptible(&rsz_dev.lock);
	if (ret)
		return ret;
	if ((mode == IMP_MODE_CONTINUOUS) ||
	    ((mode == IMP_MODE_SINGLE_SHOT) && (!imp_hw_if->serialize()))) {
		if (rsz_dev.users != 0) {
			dev_err(rsz_device,
				"\n mode doesn't allow multiple instances\n");
			mutex_unlock(&rsz_dev.lock);
			return -EBUSY;
		}
	}

	/* if usage counter is greater than maximum supported channels
	   return error */
	if (rsz_dev.users >= MAX_CHANNELS) {
		dev_err(rsz_device,
			"\n modules usage count is greater than supported ");
		mutex_unlock(&rsz_dev.lock);
		return -EBUSY;
	}

	rsz_dev.users++;
	mutex_unlock(&rsz_dev.lock);
	/* allocate     memory for a new configuration */
	rsz_conf_chan = kmalloc(sizeof(struct imp_logical_channel), GFP_KERNEL);

	if (rsz_conf_chan == NULL) {
		dev_err(rsz_device,
			"\n cannot allocate memory ro channel config");
		return -ENOMEM;
	}

	rsz_conf_chan->config_state = STATE_NOT_CONFIGURED;
	rsz_conf_chan->mode = IMP_MODE_INVALID;
	rsz_conf_chan->primary_user = 0;
	rsz_conf_chan->chained = 0;
	rsz_conf_chan->config = NULL;
	rsz_conf_chan->user_config = NULL;
	rsz_conf_chan->user_config_size = 0;

	/* Set priority to lowest for that configuration channel */
	rsz_conf_chan->priority = MIN_PRIORITY;

	/* Set the channel type to resize */
	rsz_conf_chan->type = IMP_RESIZER;

	for (i = 0; i < MAX_BUFFERS; i++) {
		rsz_conf_chan->in_bufs[i] = NULL;
		rsz_conf_chan->out_buf1s[i] = NULL;
		rsz_conf_chan->out_buf2s[i] = NULL;
	}
	rsz_conf_chan->in_numbufs = 0;
	rsz_conf_chan->out_numbuf1s = 0;
	rsz_conf_chan->out_numbuf2s = 0;

	dev_dbg(rsz_device, "Initializing	of channel done	\n");

	/* Initializing of application mutex */
	init_completion(&(rsz_conf_chan->channel_sem));
	rsz_conf_chan->channel_sem.done = 0;
	mutex_init(&(rsz_conf_chan->lock));
	/* taking the configuartion     structure in private data */
	filp->private_data = rsz_conf_chan;


	return 0;

}
Example #18
0
static ssize_t adu_write(struct file *file, const __user char *buffer,
			 size_t count, loff_t *ppos)
{
	DECLARE_WAITQUEUE(waita, current);
	struct adu_device *dev;
	size_t bytes_written = 0;
	size_t bytes_to_write;
	size_t buffer_size;
	unsigned long flags;
	int retval;

	dbg(2," %s : enter, count = %Zd", __func__, count);

	dev = file->private_data;

	retval = mutex_lock_interruptible(&dev->mtx);
	if (retval)
		goto exit_nolock;

	/* verify that the device wasn't unplugged */
	if (dev->udev == NULL) {
		retval = -ENODEV;
		printk(KERN_ERR "adutux: No device or device unplugged %d\n",
		       retval);
		goto exit;
	}

	/* verify that we actually have some data to write */
	if (count == 0) {
		dbg(1," %s : write request of 0 bytes", __func__);
		goto exit;
	}

	while (count > 0) {
		add_wait_queue(&dev->write_wait, &waita);
		set_current_state(TASK_INTERRUPTIBLE);
		spin_lock_irqsave(&dev->buflock, flags);
		if (!dev->out_urb_finished) {
			spin_unlock_irqrestore(&dev->buflock, flags);

			mutex_unlock(&dev->mtx);
			if (signal_pending(current)) {
				dbg(1," %s : interrupted", __func__);
				set_current_state(TASK_RUNNING);
				retval = -EINTR;
				goto exit_onqueue;
			}
			if (schedule_timeout(COMMAND_TIMEOUT) == 0) {
				dbg(1, "%s - command timed out.", __func__);
				retval = -ETIMEDOUT;
				goto exit_onqueue;
			}
			remove_wait_queue(&dev->write_wait, &waita);
			retval = mutex_lock_interruptible(&dev->mtx);
			if (retval) {
				retval = bytes_written ? bytes_written : retval;
				goto exit_nolock;
			}

			dbg(4," %s : in progress, count = %Zd", __func__, count);
		} else {
			spin_unlock_irqrestore(&dev->buflock, flags);
			set_current_state(TASK_RUNNING);
			remove_wait_queue(&dev->write_wait, &waita);
			dbg(4," %s : sending, count = %Zd", __func__, count);

			/* write the data into interrupt_out_buffer from userspace */
			buffer_size = le16_to_cpu(dev->interrupt_out_endpoint->wMaxPacketSize);
			bytes_to_write = count > buffer_size ? buffer_size : count;
			dbg(4," %s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd",
			    __func__, buffer_size, count, bytes_to_write);

			if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) {
				retval = -EFAULT;
				goto exit;
			}

			/* send off the urb */
			usb_fill_int_urb(
				dev->interrupt_out_urb,
				dev->udev,
				usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress),
				dev->interrupt_out_buffer,
				bytes_to_write,
				adu_interrupt_out_callback,
				dev,
				dev->interrupt_out_endpoint->bInterval);
			dev->interrupt_out_urb->actual_length = bytes_to_write;
			dev->out_urb_finished = 0;
			retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
			if (retval < 0) {
				dev->out_urb_finished = 1;
				dev_err(&dev->udev->dev, "Couldn't submit "
					"interrupt_out_urb %d\n", retval);
				goto exit;
			}

			buffer += bytes_to_write;
			count -= bytes_to_write;

			bytes_written += bytes_to_write;
		}
	}
	mutex_unlock(&dev->mtx);
	return bytes_written;

exit:
	mutex_unlock(&dev->mtx);
exit_nolock:
	dbg(2," %s : leave, return value %d", __func__, retval);
	return retval;

exit_onqueue:
	remove_wait_queue(&dev->write_wait, &waita);
	return retval;
}
Example #19
0
static ssize_t
read_aw(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
	DEFINE_WAIT(wait);
	struct aw_usb_data *aw = &aw_instance;
	ssize_t read_count;
	unsigned int partial;
	int this_read;
	int result;
	int maxretry = 10;
	char *ibuf;
	int intr;

	intr = mutex_lock_interruptible(&(aw->lock));
	if (intr)
		return -EINTR;
	/* Sanity check to make sure aw is connected, powered, etc */
	if (aw->present == 0 || aw->aw_dev == NULL) {
		mutex_unlock(&(aw->lock));
		return -ENODEV;
	}

	ibuf = aw->ibuf;

	read_count = 0;


	while (count > 0) {
		if (signal_pending(current)) {
			mutex_unlock(&(aw->lock));
			return read_count ? read_count : -EINTR;
		}
		if (!aw->aw_dev) {
			mutex_unlock(&(aw->lock));
			return -ENODEV;
		}
		this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;

		result = usb_bulk_msg(aw->aw_dev,
				      usb_rcvbulkpipe(aw->aw_dev, 1),
				      ibuf, this_read, &partial,
				      8000);

		pr_debug(
			"read stats: result:%d this_read:%u partial:%u",
						result, this_read, partial);

		if (partial) {
			count = partial;
			this_read = partial;
		} else if (result == -ETIMEDOUT || result == 15) {
							/* FIXME: 15 ??? */
			if (!maxretry--) {
				mutex_unlock(&(aw->lock));
				dev_err(&aw->aw_dev->dev, "read_aw: maxretry timeout");
				return -ETIME;
			}
			prepare_to_wait(&aw->wait_q, &wait, TASK_INTERRUPTIBLE);
			schedule_timeout(NAK_TIMEOUT);
			finish_wait(&aw->wait_q, &wait);
			continue;
		} else if (result != -EREMOTEIO) {
			mutex_unlock(&(aw->lock));
			dev_err(&aw->aw_dev->dev,
				"Read Whoops - result:%u partial:%u this_read:%u",
				result, partial, this_read);
			return -EIO;
		} else {
			mutex_unlock(&(aw->lock));
			return 0;
		}

		if (this_read) {
			if (copy_to_user(buffer, ibuf, this_read)) {
				mutex_unlock(&(aw->lock));
				return -EFAULT;
			}
			count -= this_read;
			read_count += this_read;
			buffer += this_read;
		}
	}
	mutex_unlock(&(aw->lock));
	return read_count;
}
static int mpq_tspp_dmx_remove_channel(struct dvb_demux_feed *feed)
{
	int tsif;
	int ret;
	int channel_id;
	int slot;
	atomic_t *data_cnt;
	int *channel_ref_count;
	enum tspp_source tspp_source;
	struct tspp_filter tspp_filter;
	struct mpq_demux *mpq_demux = feed->demux->priv;
	int restore_null_blocking_filters = 0;
	int remove_accept_all_filter = 0;
	int remove_user_filters = 0;
	int accept_all_filter_existed = 0;

	MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);

	
	if (mpq_demux->source == DMX_SOURCE_FRONT0) {
		tsif = 0;
		tspp_source = TSPP_SOURCE_TSIF0;
	} else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
		tsif = 1;
		tspp_source = TSPP_SOURCE_TSIF1;
	} else {
		
		MPQ_DVB_ERR_PRINT(
			"%s: invalid input source (%d)\n",
			__func__,
			mpq_demux->source);

		return -EINVAL;
	}

	if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex))
		return -ERESTARTSYS;

	channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
	channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
	data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;

	
	if (*channel_ref_count == 0) {
		
		MPQ_DVB_ERR_PRINT(
			"%s: invalid feed (%d)\n",
			__func__,
			channel_id);

		ret = -EINVAL;
		goto remove_channel_failed;
	}

	slot = mpq_tspp_get_filter_slot(tsif, feed->pid);

	if (slot < 0) {
		
		MPQ_DVB_ERR_PRINT(
			"%s: mpq_tspp_get_filter_slot failed (%d,%d)\n",
			__func__,
			feed->pid,
			tsif);

		ret = -EINVAL;
		goto remove_channel_failed;
	}

	
	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;

	if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count) {
		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
		return 0;
	}

	if (feed->pid == TSPP_PASS_THROUGH_PID)
		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
	else if (feed->pid == TSPP_NULL_PACKETS_PID)
		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;

	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;

	if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <=
					TSPP_MAX_HW_PID_FILTER_NUM) {
		
		tspp_filter.priority =
			mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index;
		ret = tspp_remove_filter(0, channel_id, &tspp_filter);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: tspp_remove_filter failed (%d,%d)\n",
				__func__,
				channel_id,
				tspp_filter.priority);

			goto remove_channel_failed_restore_count;
		}
		mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;

		MPQ_DVB_DBG_PRINT(
			"%s: HW filtering mode: Removed TSPP HW filter, PID = %d, index = %d\n",
			__func__, feed->pid, tspp_filter.priority);
	} else  if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
					(TSPP_MAX_HW_PID_FILTER_NUM + 1)) {
		

		accept_all_filter_existed =
			mpq_dmx_tspp_info.tsif[tsif].
				accept_all_filter_exists_flag;

		
		ret = mpq_tspp_add_accept_all_filter(channel_id,
					tspp_source);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
				__func__, channel_id, tspp_source);

			goto remove_channel_failed_restore_count;
		}

		ret = mpq_tspp_remove_null_blocking_filters(channel_id,
					tspp_source);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: mpq_tspp_remove_null_blocking_filters(%d, %d) failed\n",
				__func__, channel_id, tspp_source);

			restore_null_blocking_filters = 1;
			if (!accept_all_filter_existed)
				remove_accept_all_filter = 1;

			goto remove_channel_failed_restore_count;
		}

		ret = mpq_tspp_add_all_user_filters(channel_id,
					tspp_source);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: mpq_tspp_add_all_user_filters(%d, %d) failed\n",
				__func__, channel_id, tspp_source);

			remove_user_filters = 1;
			restore_null_blocking_filters = 1;
			if (!accept_all_filter_existed)
				remove_accept_all_filter = 1;

			goto remove_channel_failed_restore_count;
		}

		ret = mpq_tspp_remove_accept_all_filter(channel_id,
					tspp_source);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
				__func__, channel_id, tspp_source);

			remove_user_filters = 1;
			restore_null_blocking_filters = 1;
			if (!accept_all_filter_existed)
				remove_accept_all_filter = 1;

			goto remove_channel_failed_restore_count;
		}
	} else {
		
		if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
			(mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {

			ret = mpq_tspp_remove_accept_all_filter(channel_id,
						tspp_source);
			if (ret < 0) {
				MPQ_DVB_ERR_PRINT(
					"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
					__func__, channel_id,
					tspp_source);

				goto remove_channel_failed_restore_count;
			}
		}
	}

	mpq_dmx_tspp_info.tsif[tsif].current_filter_count--;
	(*channel_ref_count)--;

	MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
		__func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);

	if (*channel_ref_count == 0) {
		
		tspp_unregister_notification(0, channel_id);
		tspp_close_stream(0, channel_id);
		tspp_close_channel(0, channel_id);
		atomic_set(data_cnt, 0);

		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC)
			mpq_dmx_channel_mem_free(tsif);
	}

	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
	return 0;

remove_channel_failed_restore_count:
	
	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;

	if (remove_user_filters)
		mpq_tspp_remove_all_user_filters(channel_id, tspp_source);

	if (restore_null_blocking_filters)
		mpq_tspp_add_null_blocking_filters(channel_id, tspp_source);

	if (remove_accept_all_filter)
		mpq_tspp_remove_accept_all_filter(channel_id, tspp_source);

	
	if (feed->pid == TSPP_PASS_THROUGH_PID)
		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
	else if (feed->pid == TSPP_NULL_PACKETS_PID)
		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;

remove_channel_failed:
	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
	return ret;
}
Example #21
0
/* ioctl - I/O control */
static long mpu_dev_ioctl(struct file *file,
			  unsigned int cmd, unsigned long arg)
{
	struct mpu_private_data *mpu =
	    container_of(file->private_data, struct mpu_private_data, dev);
	struct i2c_client *client = mpu->client;
	struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
	int retval = 0;
	struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES];
	struct ext_slave_descr **slave = mldl_cfg->slave;
	struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave;
	int ii;

	for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
		if (!pdata_slave[ii])
			slave_adapter[ii] = NULL;
		else
			slave_adapter[ii] =
				i2c_get_adapter(pdata_slave[ii]->adapt_num);
	}
	slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter;

	retval = mutex_lock_interruptible(&mpu->mutex);
	if (retval) {
		dev_err(&client->adapter->dev,
			"%s: mutex_lock_interruptible returned %d\n",
			__func__, retval);
		return retval;
	}

	switch (cmd) {
	case MPU_GET_EXT_SLAVE_PLATFORM_DATA:
		retval = mpu_dev_ioctl_get_ext_slave_platform_data(
			client,
			(struct ext_slave_platform_data __user *)arg);
		break;
	case MPU_GET_MPU_PLATFORM_DATA:
		retval = mpu_dev_ioctl_get_mpu_platform_data(
			client,
			(struct mpu_platform_data __user *)arg);
		break;
	case MPU_GET_EXT_SLAVE_DESCR:
		retval = mpu_dev_ioctl_get_ext_slave_descr(
			client,
			(struct ext_slave_descr __user *)arg);
		break;
	case MPU_READ:
	case MPU_WRITE:
	case MPU_READ_MEM:
	case MPU_WRITE_MEM:
	case MPU_READ_FIFO:
	case MPU_WRITE_FIFO:
		retval = mpu_handle_mlsl(
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			mldl_cfg->mpu_chip_info->addr, cmd,
			(struct mpu_read_write __user *)arg);
		break;
	case MPU_CONFIG_GYRO:
		retval = inv_mpu_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_CONFIG_ACCEL:
		retval = slave_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_ACCEL],
			slave[EXT_SLAVE_TYPE_ACCEL],
			pdata_slave[EXT_SLAVE_TYPE_ACCEL],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_CONFIG_COMPASS:
		retval = slave_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_COMPASS],
			slave[EXT_SLAVE_TYPE_COMPASS],
			pdata_slave[EXT_SLAVE_TYPE_COMPASS],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_CONFIG_PRESSURE:
		retval = slave_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
			slave[EXT_SLAVE_TYPE_PRESSURE],
			pdata_slave[EXT_SLAVE_TYPE_PRESSURE],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_GET_CONFIG_GYRO:
		retval = inv_mpu_get_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_GET_CONFIG_ACCEL:
		retval = slave_get_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_ACCEL],
			slave[EXT_SLAVE_TYPE_ACCEL],
			pdata_slave[EXT_SLAVE_TYPE_ACCEL],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_GET_CONFIG_COMPASS:
		retval = slave_get_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_COMPASS],
			slave[EXT_SLAVE_TYPE_COMPASS],
			pdata_slave[EXT_SLAVE_TYPE_COMPASS],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_GET_CONFIG_PRESSURE:
		retval = slave_get_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
			slave[EXT_SLAVE_TYPE_PRESSURE],
			pdata_slave[EXT_SLAVE_TYPE_PRESSURE],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_SUSPEND:
		retval = inv_mpu_suspend(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_ACCEL],
			slave_adapter[EXT_SLAVE_TYPE_COMPASS],
			slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
			arg);
		break;
	case MPU_RESUME:
		retval = inv_mpu_resume(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_ACCEL],
			slave_adapter[EXT_SLAVE_TYPE_COMPASS],
			slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
			arg);
		break;
	case MPU_PM_EVENT_HANDLED:
		dev_dbg(&client->adapter->dev, "%s: %d\n", __func__, cmd);
		complete(&mpu->completion);
		break;
	case MPU_READ_ACCEL:
		retval = inv_slave_read(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_ACCEL],
			slave[EXT_SLAVE_TYPE_ACCEL],
			pdata_slave[EXT_SLAVE_TYPE_ACCEL],
			(unsigned char __user *)arg);
		break;
	case MPU_READ_COMPASS:
		retval = inv_slave_read(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_COMPASS],
			slave[EXT_SLAVE_TYPE_COMPASS],
			pdata_slave[EXT_SLAVE_TYPE_COMPASS],
			(unsigned char __user *)arg);
		break;
	case MPU_READ_PRESSURE:
		retval = inv_slave_read(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
			slave[EXT_SLAVE_TYPE_PRESSURE],
			pdata_slave[EXT_SLAVE_TYPE_PRESSURE],
			(unsigned char __user *)arg);
		break;
	case MPU_GET_REQUESTED_SENSORS:
		if (copy_to_user(
			   (__u32 __user *)arg,
			   &mldl_cfg->inv_mpu_cfg->requested_sensors,
			   sizeof(mldl_cfg->inv_mpu_cfg->requested_sensors)))
			retval = -EFAULT;
		break;
	case MPU_SET_REQUESTED_SENSORS:
		mldl_cfg->inv_mpu_cfg->requested_sensors = arg;
		break;
	case MPU_GET_IGNORE_SYSTEM_SUSPEND:
		if (copy_to_user(
			(unsigned char __user *)arg,
			&mldl_cfg->inv_mpu_cfg->ignore_system_suspend,
			sizeof(mldl_cfg->inv_mpu_cfg->ignore_system_suspend)))
			retval = -EFAULT;
		break;
	case MPU_SET_IGNORE_SYSTEM_SUSPEND:
		mldl_cfg->inv_mpu_cfg->ignore_system_suspend = arg;
		break;
	case MPU_GET_MLDL_STATUS:
		if (copy_to_user(
			(unsigned char __user *)arg,
			&mldl_cfg->inv_mpu_state->status,
			sizeof(mldl_cfg->inv_mpu_state->status)))
			retval = -EFAULT;
		break;
	case MPU_GET_I2C_SLAVES_ENABLED:
		if (copy_to_user(
			(unsigned char __user *)arg,
			&mldl_cfg->inv_mpu_state->i2c_slaves_enabled,
			sizeof(mldl_cfg->inv_mpu_state->i2c_slaves_enabled)))
			retval = -EFAULT;
		break;
	default:
		dev_err(&client->adapter->dev,
			"%s: Unknown cmd %x, arg %lu\n",
			__func__, cmd, arg);
		retval = -EINVAL;
	};

	mutex_unlock(&mpu->mutex);
	dev_dbg(&client->adapter->dev, "%s: %08x, %08lx, %d\n",
		__func__, cmd, arg, retval);

	if (retval > 0)
		retval = -retval;

	return retval;
}
static int mpq_dmx_tspp_thread(void *arg)
{
	int tsif = (int)arg;
	struct mpq_demux *mpq_demux;
	const struct tspp_data_descriptor *tspp_data_desc;
	atomic_t *data_cnt;
	u32 notif_size;
	int channel_id;
	int ref_count;
	int ret;

	do {
		ret = wait_event_interruptible(
			mpq_dmx_tspp_info.tsif[tsif].wait_queue,
			atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) ||
			kthread_should_stop());

		if ((ret < 0) || kthread_should_stop()) {
			MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
			break;
		}

		
		if (mutex_lock_interruptible(
			&mpq_dmx_tspp_info.tsif[tsif].mutex))
			return -ERESTARTSYS;

		channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);

		ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
		data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;

		
		if (ref_count == 0) {
			mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
			continue;
		}

		atomic_dec(data_cnt);

		mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
		mpq_demux->hw_notification_size = 0;

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC != allocation_mode &&
			mpq_sdmx_is_loaded())
			pr_err_once(
				"%s: TSPP Allocation mode does not support secure demux.\n",
				__func__);

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC == allocation_mode &&
			mpq_sdmx_is_loaded()) {
			mpq_dmx_tspp_aggregated_process(tsif, channel_id);
		} else {
			while ((tspp_data_desc = tspp_get_buffer(0, channel_id))
					!= NULL) {
				notif_size = tspp_data_desc->size /
					TSPP_RAW_TTS_SIZE;
				mpq_demux->hw_notification_size += notif_size;

				mpq_dmx_tspp_swfilter_desc(mpq_demux,
					tspp_data_desc);
				tspp_release_buffer(0, channel_id,
					tspp_data_desc->id);
			}
		}

		if (mpq_demux->hw_notification_size &&
			(mpq_demux->hw_notification_size <
			mpq_demux->hw_notification_min_size))
			mpq_demux->hw_notification_min_size =
				mpq_demux->hw_notification_size;

		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
	} while (1);

	return 0;
}
/* pins a list of handle_ref objects; same conditions apply as to
 * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
int nvmap_pin_ids(struct nvmap_client *client,
		  unsigned int nr, const unsigned long *ids)
{
	int ret = 0;
	int cnt = 0;
	unsigned int i;
	struct nvmap_handle **h = (struct nvmap_handle **)ids;
	struct nvmap_handle_ref *ref;

	/* to optimize for the common case (client provided valid handle
	 * references and the pin succeeds), increment the handle_ref pin
	 * count during validation. in error cases, the tree will need to
	 * be re-walked, since the handle_ref is discarded so that an
	 * allocation isn't required. if a handle_ref is not found,
	 * locally validate that the caller has permission to pin the handle;
	 * handle_refs are not created in this case, so it is possible that
	 * if the caller crashes after pinning a global handle, the handle
	 * will be permanently leaked. */
	nvmap_ref_lock(client);
	for (i = 0; i < nr && !ret; i++) {
		ref = _nvmap_validate_id_locked(client, ids[i]);
		if (ref) {
			atomic_inc(&ref->pin);
			nvmap_handle_get(h[i]);
		} else {
			struct nvmap_handle *verify;
			nvmap_ref_unlock(client);
			verify = nvmap_validate_get(client, ids[i]);
			if (verify)
				nvmap_warn(client, "%s pinning unreferenced "
					   "handle %p\n",
					   current->group_leader->comm, h[i]);
			else
				ret = -EPERM;
			nvmap_ref_lock(client);
		}
	}
	nvmap_ref_unlock(client);

	nr = i;

	if (ret)
		goto out;

	ret = mutex_lock_interruptible(&client->share->pin_lock);
	if (WARN_ON(ret))
		goto out;

	for (cnt = 0; cnt < nr && !ret; cnt++) {
		ret = wait_pin_locked(client, h[cnt]);
	}
	mutex_unlock(&client->share->pin_lock);

	if (ret) {
		int do_wake = 0;

		for (i = 0; i < cnt; i++)
			do_wake |= handle_unpin(client, h[i]);

		if (do_wake)
			wake_up(&client->share->pin_wait);

		ret = -EINTR;
	} else {
		for (i = 0; i < nr; i++) {
			if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
				map_iovmm_area(h[i]);
		}
	}

out:
	if (ret) {
		nvmap_ref_lock(client);
		for (i = 0; i < nr; i++) {
			ref = _nvmap_validate_id_locked(client, ids[i]);
			if (!ref) {
				nvmap_warn(client, "%s freed handle %p "
					   "during pinning\n",
					   current->group_leader->comm,
					   (void *)ids[i]);
				continue;
			}
			atomic_dec(&ref->pin);
		}
		nvmap_ref_unlock(client);

		for (i = cnt; i < nr; i++)
			nvmap_handle_put(h[i]);
	}

	return ret;
}
static int mpq_tspp_dmx_add_channel(struct dvb_demux_feed *feed)
{
	struct mpq_demux *mpq_demux = feed->demux->priv;
	struct tspp_select_source tspp_source;
	struct tspp_filter tspp_filter;
	int tsif;
	int ret = 0;
	int slot;
	int channel_id;
	int *channel_ref_count;
	u32 buffer_size;
	int restore_user_filters = 0;
	int remove_accept_all_filter = 0;
	int remove_null_blocking_filters = 0;

	tspp_source.clk_inverse = clock_inv;
	tspp_source.data_inverse = 0;
	tspp_source.sync_inverse = 0;
	tspp_source.enable_inverse = 0;

	MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);

	switch (tsif_mode) {
	case 1:
		tspp_source.mode = TSPP_TSIF_MODE_1;
		break;
	case 2:
		tspp_source.mode = TSPP_TSIF_MODE_2;
		break;
	default:
		tspp_source.mode = TSPP_TSIF_MODE_LOOPBACK;
		break;
	}

	
	if (mpq_demux->source == DMX_SOURCE_FRONT0) {
		tsif = 0;
		tspp_source.source = TSPP_SOURCE_TSIF0;
	} else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
		tsif = 1;
		tspp_source.source = TSPP_SOURCE_TSIF1;
	} else {
		
		MPQ_DVB_ERR_PRINT(
			"%s: invalid input source (%d)\n",
			__func__,
			mpq_demux->source);

		return -EINVAL;
	}

	if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex))
		return -ERESTARTSYS;

	slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
	if (slot >= 0) {
		
		mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
		return 0;
	}

	channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
	channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
	buffer_size = TSPP_DESCRIPTOR_SIZE;

	
	if (*channel_ref_count == 0) {
		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
			ret = mpq_dmx_channel_mem_alloc(tsif);
			if (ret < 0) {
				MPQ_DVB_ERR_PRINT(
					"%s: mpq_dmx_channel_mem_alloc(%d) failed (%d)\n",
					__func__,
					channel_id,
					ret);

				goto add_channel_failed;
			}
		}

		ret = tspp_open_channel(0, channel_id);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: tspp_open_channel(%d) failed (%d)\n",
				__func__,
				channel_id,
				ret);

			goto add_channel_failed;
		}

		
		ret = tspp_open_stream(0, channel_id, &tspp_source);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: tspp_select_source(%d,%d) failed (%d)\n",
				__func__,
				channel_id,
				tspp_source.source,
				ret);

			goto add_channel_close_ch;
		}

		
		tspp_register_notification(0,
					   channel_id,
					   mpq_tspp_callback,
					   (void *)tsif,
					   tspp_channel_timeout);

		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
			ret = tspp_allocate_buffers(0, channel_id,
				   mpq_dmx_tspp_info.tsif[tsif].buffer_count,
				   buffer_size, tspp_notification_size,
				   tspp_mem_allocator, tspp_mem_free, NULL);
		} else {
			ret = tspp_allocate_buffers(0, channel_id,
				   mpq_dmx_tspp_info.tsif[tsif].buffer_count,
				   buffer_size, tspp_notification_size,
				   NULL, NULL, NULL);
		}
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: tspp_allocate_buffers(%d) failed (%d)\n",
				__func__,
				channel_id,
				ret);

			goto add_channel_unregister_notif;
		}

		mpq_dmx_tspp_info.tsif[tsif].mpq_demux = mpq_demux;
	}

	
	slot = mpq_tspp_get_free_filter_slot(tsif);
	if (slot < 0) {
		MPQ_DVB_ERR_PRINT(
			"%s: mpq_tspp_get_free_filter_slot(%d) failed\n",
			__func__, tsif);

		goto add_channel_unregister_notif;
	}

	if (feed->pid == TSPP_PASS_THROUGH_PID)
		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
	else if (feed->pid == TSPP_NULL_PACKETS_PID)
		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;

	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;

	tspp_filter.priority = -1;

	if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <
					TSPP_MAX_HW_PID_FILTER_NUM) {
		
		tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
		if (tspp_filter.priority < 0)
			goto add_channel_free_filter_slot;

		if (feed->pid == TSPP_PASS_THROUGH_PID) {
			
			tspp_filter.pid = 0;
			tspp_filter.mask = 0;
		} else {
			tspp_filter.pid = feed->pid;
			tspp_filter.mask = TSPP_PID_MASK;
		}

		tspp_filter.mode = TSPP_MODE_RAW;
		tspp_filter.source = tspp_source.source;
		tspp_filter.decrypt = 0;
		ret = tspp_add_filter(0, channel_id, &tspp_filter);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: tspp_add_filter(%d) failed (%d)\n",
				__func__,
				channel_id,
				ret);

			goto add_channel_free_filter_slot;
		}
		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
			tspp_filter.priority;

		MPQ_DVB_DBG_PRINT(
			"%s: HW filtering mode: added TSPP HW filter, PID = %d, mask = 0x%X, index = %d\n",
			__func__, tspp_filter.pid, tspp_filter.mask,
			tspp_filter.priority);
	} else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
					TSPP_MAX_HW_PID_FILTER_NUM) {
		

		
		ret = mpq_tspp_add_accept_all_filter(channel_id,
					tspp_source.source);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
				__func__, channel_id, tspp_source.source);

			goto add_channel_free_filter_slot;
		}

		
		ret = mpq_tspp_remove_all_user_filters(channel_id,
					tspp_source.source);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: mpq_tspp_remove_all_user_filters(%d, %d) failed\n",
				__func__, channel_id, tspp_source.source);

			restore_user_filters = 1;
			remove_accept_all_filter = 1;

			goto add_channel_free_filter_slot;
		}

		
		ret = mpq_tspp_add_null_blocking_filters(channel_id,
					tspp_source.source);
		if (ret < 0) {
			MPQ_DVB_ERR_PRINT(
				"%s: mpq_tspp_add_null_blocking_filters(%d, %d) failed\n",
				__func__, channel_id, tspp_source.source);

			restore_user_filters = 1;
			remove_accept_all_filter = 1;

			goto add_channel_free_filter_slot;
		}

		
		if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
			(mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {

			ret = mpq_tspp_remove_accept_all_filter(channel_id,
						tspp_source.source);
			if (ret < 0) {
				MPQ_DVB_ERR_PRINT(
					"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
					__func__, channel_id,
					tspp_source.source);

				remove_null_blocking_filters = 1;
				restore_user_filters = 1;
				remove_accept_all_filter = 1;

				goto add_channel_free_filter_slot;
			}
		}
	} else {
		
		if (mpq_dmx_tspp_info.tsif[tsif].pass_all_flag ||
			mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag) {

			ret = mpq_tspp_add_accept_all_filter(channel_id,
						tspp_source.source);
			if (ret < 0) {
				MPQ_DVB_ERR_PRINT(
					"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
					__func__, channel_id,
					tspp_source.source);

				goto add_channel_free_filter_slot;
			}
		}
	}

	(*channel_ref_count)++;
	mpq_dmx_tspp_info.tsif[tsif].current_filter_count++;

	MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
		__func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);

	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
	return 0;

add_channel_free_filter_slot:
	
	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;

	
	if (tspp_filter.priority >= 0) {
		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
		mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
	}

	
	if (remove_null_blocking_filters)
		mpq_tspp_remove_null_blocking_filters(channel_id,
						tspp_source.source);

	if (restore_user_filters)
		mpq_tspp_add_all_user_filters(channel_id, tspp_source.source);

	if (remove_accept_all_filter)
		mpq_tspp_remove_accept_all_filter(channel_id,
						tspp_source.source);

	
	if (feed->pid == TSPP_PASS_THROUGH_PID)
		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
	else if (feed->pid == TSPP_NULL_PACKETS_PID)
		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;

add_channel_unregister_notif:
	if (*channel_ref_count == 0) {
		tspp_unregister_notification(0, channel_id);
		tspp_close_stream(0, channel_id);
	}
add_channel_close_ch:
	if (*channel_ref_count == 0)
		tspp_close_channel(0, channel_id);
add_channel_failed:
	if (*channel_ref_count == 0)
		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC)
			mpq_dmx_channel_mem_free(tsif);

	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
	return ret;
}
Example #25
0
static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
			      unsigned int cmd, void *parg)
{
	struct dmxdev_filter *dmxdevfilter = file->private_data;
	struct dmxdev *dmxdev = dmxdevfilter->dev;
	unsigned long arg = (unsigned long)parg;
	int ret = 0;

	if (mutex_lock_interruptible(&dmxdev->mutex))
		return -ERESTARTSYS;

	switch (cmd) {
	case DMX_START:
		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
			mutex_unlock(&dmxdev->mutex);
			return -ERESTARTSYS;
		}
		if (dmxdevfilter->state < DMXDEV_STATE_SET)
			ret = -EINVAL;
		else
			ret = dvb_dmxdev_filter_start(dmxdevfilter);
		mutex_unlock(&dmxdevfilter->mutex);
		break;

	case DMX_STOP:
		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
			mutex_unlock(&dmxdev->mutex);
			return -ERESTARTSYS;
		}
		ret = dvb_dmxdev_filter_stop(dmxdevfilter);
		mutex_unlock(&dmxdevfilter->mutex);
		break;

	case DMX_SET_FILTER:
		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
			mutex_unlock(&dmxdev->mutex);
			return -ERESTARTSYS;
		}
		ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg);
		mutex_unlock(&dmxdevfilter->mutex);
		break;

	case DMX_SET_PES_FILTER:
		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
			mutex_unlock(&dmxdev->mutex);
			return -ERESTARTSYS;
		}
		ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg);
		mutex_unlock(&dmxdevfilter->mutex);
		break;

	case DMX_SET_BUFFER_SIZE:
		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
			mutex_unlock(&dmxdev->mutex);
			return -ERESTARTSYS;
		}
		ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg);
		mutex_unlock(&dmxdevfilter->mutex);
		break;

	case DMX_GET_PES_PIDS:
		if (!dmxdev->demux->get_pes_pids) {
			ret = -EINVAL;
			break;
		}
		dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
		break;

	case DMX_GET_CAPS:
		if (!dmxdev->demux->get_caps) {
			ret = -EINVAL;
			break;
		}
		ret = dmxdev->demux->get_caps(dmxdev->demux, parg);
		break;

	case DMX_SET_SOURCE:
		if (!dmxdev->demux->set_source) {
			ret = -EINVAL;
			break;
		}
		ret = dmxdev->demux->set_source(dmxdev->demux, parg);
		break;

	case DMX_GET_STC:
		if (!dmxdev->demux->get_stc) {
			ret = -EINVAL;
			break;
		}
		ret = dmxdev->demux->get_stc(dmxdev->demux,
					     ((struct dmx_stc *)parg)->num,
					     &((struct dmx_stc *)parg)->stc,
					     &((struct dmx_stc *)parg)->base);
		break;

	default:
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&dmxdev->mutex);
	return ret;
}
Example #26
0
static ssize_t skel_read(struct file *file, char *buffer, size_t count,
			 loff_t *ppos)
{
	struct usb_skel *dev;
	int rv;
	bool ongoing_io;

	dev = (struct usb_skel *)file->private_data;

	pr_debug("%s ++ (%d)\n", __func__, current->pid);

	/* if we cannot read at all, return EOF */
	if (!dev->bulk_in_urb || !count)
		return 0;

	/* no concurrent readers */
	rv = mutex_lock_interruptible(&dev->io_mutex);
	if (rv < 0) {
		err("%s: failed to get mutex", __func__);
		return rv;
	}

	if (!dev->interface) {		/* disconnect() was called */
		rv = -ENODEV;
		goto exit;
	}

	/* if IO is under way, we must not touch things */
retry:
	spin_lock_irq(&dev->err_lock);
	ongoing_io = dev->ongoing_read;
	spin_unlock_irq(&dev->err_lock);

	if (ongoing_io) {
		/* nonblocking IO shall not wait */
		if (file->f_flags & O_NONBLOCK) {
			rv = -EAGAIN;
			goto exit;
		}
		/*
		 * IO may take forever
		 * hence wait in an interruptible state
		 */
		rv = wait_for_completion_interruptible
				(&dev->bulk_in_completion);
		if (rv < 0)
			goto exit;
		/*
		 * by waiting we also semiprocessed the urb
		 * we must finish now
		 */
		dev->bulk_in_copied = 0;
	}

	rv = skel_do_read_io(dev, dev->bulk_in_size);
	if (rv < 0) {
		err("%s - failed submitting read urb, error %d", __func__, rv);
		rv = 0;
		goto exit;
	}
	init_completion(&dev->bulk_in_completion);

	/* errors must be reported */
	rv = dev->errors;
	if (rv < 0) {
		/* any error is reported once */
		dev->errors = 0;
		/* to preserve notifications about reset */
		rv = (rv == -EPIPE) ? rv : -EIO;
		/* no data to deliver */
		dev->bulk_in_filled = 0;
		/* report it */
		goto exit;
	}

	/*
	 * if the buffer is filled we may satisfy the read
	 * else we need to start IO
	 */

	if (dev->bulk_in_filled) {
		/* we had read data */
		size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
		size_t chunk = min(available, count);

		if (!available) {
				rv = 0;
				goto exit;
		}
		/*
		 * data is available
		 * chunk tells us how much shall be copied
		 */

		if (copy_to_user(buffer,
				 dev->bulk_in_buffer + dev->bulk_in_copied,
				 chunk))
			rv = -EFAULT;
		else
			rv = chunk;

		dev->bulk_in_copied += chunk;

		if (dev->bulk_in_filled == dev->bulk_in_copied) {
			dev->bulk_in_filled = 0;
			dev->bulk_in_copied = 0;
		}

	} else {
		rv = 0;
		goto exit;

	}
exit:
	mutex_unlock(&dev->io_mutex);
	pr_debug("%s -- ret=%d\n", __func__, rv);
	return rv;
}
Example #27
0
static ssize_t wdm_read
(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
	int rv, cntr;
	int i = 0;
	struct wdm_device *desc = file->private_data;


	rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */
	if (rv < 0)
		return -ERESTARTSYS;

	cntr = ACCESS_ONCE(desc->length);
	if (cntr == 0) {
		desc->read = 0;
retry:
		if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
			rv = -ENODEV;
			goto err;
		}
		if (test_bit(WDM_OVERFLOW, &desc->flags)) {
			clear_bit(WDM_OVERFLOW, &desc->flags);
			rv = -ENOBUFS;
			goto err;
		}
		i++;
		if (file->f_flags & O_NONBLOCK) {
			if (!test_bit(WDM_READ, &desc->flags)) {
				rv = cntr ? cntr : -EAGAIN;
				goto err;
			}
			rv = 0;
		} else {
			rv = wait_event_interruptible(desc->wait,
				test_bit(WDM_READ, &desc->flags));
		}

		/* may have happened while we slept */
		if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
			rv = -ENODEV;
			goto err;
		}
		usb_mark_last_busy(interface_to_usbdev(desc->intf));
		if (rv < 0) {
			rv = -ERESTARTSYS;
			goto err;
		}

		spin_lock_irq(&desc->iuspin);

		if (desc->rerr) { /* read completed, error happened */
			desc->rerr = 0;
			spin_unlock_irq(&desc->iuspin);
			rv = -EIO;
			goto err;
		}
		/*
		 * recheck whether we've lost the race
		 * against the completion handler
		 */
		if (!test_bit(WDM_READ, &desc->flags)) { /* lost race */
			spin_unlock_irq(&desc->iuspin);
			goto retry;
		}

		if (!desc->reslength) { /* zero length read */
			dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
			clear_bit(WDM_READ, &desc->flags);
			spin_unlock_irq(&desc->iuspin);
			goto retry;
		}
		cntr = desc->length;
		spin_unlock_irq(&desc->iuspin);
	}

	if (cntr > count)
		cntr = count;
	rv = copy_to_user(buffer, desc->ubuf, cntr);
	if (rv > 0) {
		rv = -EFAULT;
		goto err;
	}

	spin_lock_irq(&desc->iuspin);

	for (i = 0; i < desc->length - cntr; i++)
		desc->ubuf[i] = desc->ubuf[i + cntr];

	spin_lock_irq(&desc->iuspin);
	desc->length -= cntr;
	spin_unlock_irq(&desc->iuspin);
	/* in case we had outstanding data */
	if (!desc->length)
		clear_bit(WDM_READ, &desc->flags);

	spin_unlock_irq(&desc->iuspin);

	rv = cntr;

err:
	mutex_unlock(&desc->rlock);
	return rv;
}
Example #28
0
/* I2C */
static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
	int num)
{
	int ret;
	struct dvb_usb_device *d = i2c_get_adapdata(adap);
	struct rtl28xxu_dev *dev = d->priv;
	struct rtl28xxu_req req;

	/*
	 * It is not known which are real I2C bus xfer limits, but testing
	 * with RTL2831U + MT2060 gives max RD 24 and max WR 22 bytes.
	 * TODO: find out RTL2832U lens
	 */

	/*
	 * I2C adapter logic looks rather complicated due to fact it handles
	 * three different access methods. Those methods are;
	 * 1) integrated demod access
	 * 2) old I2C access
	 * 3) new I2C access
	 *
	 * Used method is selected in order 1, 2, 3. Method 3 can handle all
	 * requests but there is two reasons why not use it always;
	 * 1) It is most expensive, usually two USB messages are needed
	 * 2) At least RTL2831U does not support it
	 *
	 * Method 3 is needed in case of I2C write+read (typical register read)
	 * where write is more than one byte.
	 */

	if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
		return -EAGAIN;

	if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
		(msg[1].flags & I2C_M_RD)) {
		if (msg[0].len > 24 || msg[1].len > 24) {
			/* TODO: check msg[0].len max */
			ret = -EOPNOTSUPP;
			goto err_mutex_unlock;
		} else if (msg[0].addr == 0x10) {
			/* method 1 - integrated demod */
			req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
			req.index = CMD_DEMOD_RD | dev->page;
			req.size = msg[1].len;
			req.data = &msg[1].buf[0];
			ret = rtl28xxu_ctrl_msg(d, &req);
		} else if (msg[0].len < 2) {
			/* method 2 - old I2C */
			req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
			req.index = CMD_I2C_RD;
			req.size = msg[1].len;
			req.data = &msg[1].buf[0];
			ret = rtl28xxu_ctrl_msg(d, &req);
		} else {
			/* method 3 - new I2C */
			req.value = (msg[0].addr << 1);
			req.index = CMD_I2C_DA_WR;
			req.size = msg[0].len;
			req.data = msg[0].buf;
			ret = rtl28xxu_ctrl_msg(d, &req);
			if (ret)
				goto err_mutex_unlock;

			req.value = (msg[0].addr << 1);
			req.index = CMD_I2C_DA_RD;
			req.size = msg[1].len;
			req.data = msg[1].buf;
			ret = rtl28xxu_ctrl_msg(d, &req);
		}
	} else if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
		if (msg[0].len > 22) {
			/* TODO: check msg[0].len max */
			ret = -EOPNOTSUPP;
			goto err_mutex_unlock;
		} else if (msg[0].addr == 0x10) {
			/* method 1 - integrated demod */
			if (msg[0].buf[0] == 0x00) {
				/* save demod page for later demod access */
				dev->page = msg[0].buf[1];
				ret = 0;
			} else {
				req.value = (msg[0].buf[0] << 8) |
					(msg[0].addr << 1);
				req.index = CMD_DEMOD_WR | dev->page;
				req.size = msg[0].len-1;
				req.data = &msg[0].buf[1];
				ret = rtl28xxu_ctrl_msg(d, &req);
			}
		} else if (msg[0].len < 23) {
			/* method 2 - old I2C */
			req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
			req.index = CMD_I2C_WR;
			req.size = msg[0].len-1;
			req.data = &msg[0].buf[1];
			ret = rtl28xxu_ctrl_msg(d, &req);
		} else {
			/* method 3 - new I2C */
			req.value = (msg[0].addr << 1);
			req.index = CMD_I2C_DA_WR;
			req.size = msg[0].len;
			req.data = msg[0].buf;
			ret = rtl28xxu_ctrl_msg(d, &req);
		}
	} else {
		ret = -EINVAL;
	}

err_mutex_unlock:
	mutex_unlock(&d->i2c_mutex);

	return ret ? ret : num;
}
Example #29
0
static int ptrace_attach(struct task_struct *task)
{
	bool wait_trap = false;
	int retval;

	audit_ptrace(task);

	retval = -EPERM;
	if (unlikely(task->flags & PF_KTHREAD))
		goto out;
	if (same_thread_group(task, current))
		goto out;

	/*
	 * Protect exec's credential calculations against our interference;
	 * interference; SUID, SGID and LSM creds get determined differently
	 * under ptrace.
	 */
	retval = -ERESTARTNOINTR;
	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
		goto out;

	task_lock(task);
	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
	task_unlock(task);
	if (retval)
		goto unlock_creds;

	write_lock_irq(&tasklist_lock);
	retval = -EPERM;
	if (unlikely(task->exit_state))
		goto unlock_tasklist;
	if (task->ptrace)
		goto unlock_tasklist;

	task->ptrace = PT_PTRACED;
	if (task_ns_capable(task, CAP_SYS_PTRACE))
		task->ptrace |= PT_PTRACE_CAP;

	__ptrace_link(task, current);
	send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);

	spin_lock(&task->sighand->siglock);

	/*
	 * If the task is already STOPPED, set GROUP_STOP_PENDING and
	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
	 * will be cleared if the child completes the transition or any
	 * event which clears the group stop states happens.  We'll wait
	 * for the transition to complete before returning from this
	 * function.
	 *
	 * This hides STOPPED -> RUNNING -> TRACED transition from the
	 * attaching thread but a different thread in the same group can
	 * still observe the transient RUNNING state.  IOW, if another
	 * thread's WNOHANG wait(2) on the stopped tracee races against
	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
	 *
	 * The following task_is_stopped() test is safe as both transitions
	 * in and out of STOPPED are protected by siglock.
	 */
	if (task_is_stopped(task)) {
		task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
		signal_wake_up(task, 1);
		wait_trap = true;
	}

	spin_unlock(&task->sighand->siglock);

	retval = 0;
unlock_tasklist:
	write_unlock_irq(&tasklist_lock);
unlock_creds:
	mutex_unlock(&task->signal->cred_guard_mutex);
out:
	if (wait_trap)
		wait_event(current->signal->wait_chldexit,
			   !(task->group_stop & GROUP_STOP_TRAPPING));
	return retval;
}
static long mt9d115_ioctl(struct file *file, unsigned int cmd,
			  unsigned long arg)
{
	struct mt9d115_info *info = file->private_data;
	struct mt9d115_mode mode;
	struct mt9d115_reg *config;
	struct mt9d115_iso *iso;
	int ret, et;

	ret = mutex_lock_interruptible(&info->lock);
	if (ret)
		return ret;

	switch (cmd) {
	case MT9D115_IOCTL_SET_MODE:
		if (copy_from_user(&mode,(const void __user *)arg,
				   sizeof(struct mt9d115_mode))) {
			ret = -EFAULT;
			break;
		}

		ret = mt9d115_set_mode(info, &mode);
		break;

	case MT9D115_IOCTL_SET_COLOR_EFFECT:
		switch ((unsigned int)arg & MT9D115_COLOR_EFFECT_MASK) {
		case MT9D115_COLOR_EFFECT_NONE:
			config = color_effect_none;
			break;

		case MT9D115_COLOR_EFFECT_MONO:
			config = color_effect_mono;
			break;

		case MT9D115_COLOR_EFFECT_SEPIA:
			config = color_effect_sepia;
			break;

		case MT9D115_COLOR_EFFECT_NEGATIVE:
			config = color_effect_negative;
			break;

		case MT9D115_COLOR_EFFECT_SOLARIZE:
			config = color_effect_solarize;
			break;

		default:
			config = NULL;
			break;
		}

		if (config) {
			if (mode_table_status == MT9D115_MODE_UNINITED)
				config_table[MT9D115_COLOR_EFFECT_CONFIG] = config;
			else
				ret = mt9d115_write_table(info->i2c_client,
							  config);
		} else {
			ret = -EINVAL;
		}

		break;

	case MT9D115_IOCTL_SET_WHITE_BALANCE:
		switch ((unsigned int)arg) {
		case MT9D115_WHITE_BALANCE_AUTO:
			config = white_balance_auto;
			break;

		case MT9D115_WHITE_BALANCE_INCANDESCENT:
			config = white_balance_incandescent;
			break;

		case MT9D115_WHITE_BALANCE_DAYLIGHT:
			config = white_balance_daylight;
			break;

		case MT9D115_WHITE_BALANCE_FLUORESCENT:
			config = white_balance_fluorescent;
			break;

		case MT9D115_WHITE_BALANCE_CLOUDY:
			config = white_balance_cloudy;
			break;

		default:
			config = NULL;
			break;
		}

		if (config) {
			if (mode_table_status == MT9D115_MODE_UNINITED)
				config_table[MT9D115_WHITE_BALANCE_CONFIG] = config;
			else
				ret = mt9d115_write_table(info->i2c_client,
							  config);
		} else {
			ret = -EINVAL;
		}

		break;

	case MT9D115_IOCTL_SET_EXPOSURE:
		switch ((int)arg) {
		case MT9D115_EXPOSURE_0:
			ret = mt9d115_write_table(info->i2c_client, exposure_0);
			break;

		case MT9D115_EXPOSURE_PLUS_1:
			ret = mt9d115_write_table(info->i2c_client,
						  exposure_plus_1);
			break;

		case MT9D115_EXPOSURE_PLUS_2:
			ret = mt9d115_write_table(info->i2c_client,
						  exposure_plus_2);
			break;

		case MT9D115_EXPOSURE_MINUS_1:
			ret = mt9d115_write_table(info->i2c_client,
						  exposure_minus_1);
			break;

		case MT9D115_EXPOSURE_MINUS_2:
			ret = mt9d115_write_table(info->i2c_client,
						  exposure_minus_2);
			break;

		default:
			ret = -EINVAL;
			break;
		}

		break;

	case MT9D115_IOCTL_GET_ISO:
		mt9d115_write_reg(info->i2c_client, 0x098C, AE_VIRT_GAIN_ADDR);
		ret = mt9d115_read_reg(info->i2c_client, 0x0990);
		if (ret < 0)
			break;

		for (iso = iso_table + 1; iso->value >= 0; iso++) {
			struct mt9d115_iso *pre_iso;

			if (iso->again < ret)
				continue;

			pre_iso = iso - 1;
			ret = (iso->value - pre_iso->value) *
				(ret - pre_iso->again) /
				(iso->again - pre_iso->again) + pre_iso->value;
			break;
		}

		break;

	case MT9D115_IOCTL_GET_EXPOSURE_TIME:
		mt9d115_write_reg(info->i2c_client, 0x098C, AE_R9_ADDR);
		ret = mt9d115_read_reg(info->i2c_client, 0x0990);
		if (ret < 0)
			break;

		et = ret * 625;
		ret = mt9d115_read_reg(info->i2c_client, LINE_LENGTH_PCK_ADDR);
		if (ret < 0)
			break;

		et = ret * et;
		ret = mt9d115_read_reg(info->i2c_client,
				       FINE_INTEGRATION_TIME_ADDR);
		if (ret < 0)
			break;

		ret = (ret + et) / OUTPUT_CLK;
		break;

	default:
		ret = -EINVAL;
	}

	mutex_unlock(&info->lock);
	return ret;
}