static ssize_t get_idac_debugfs_read(struct file *filp, char __user *buf,
		size_t count, loff_t *ppos)
{
	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
	struct cyttsp5_device_access_data *dad = data->dad;
	struct device *dev = dad->dev;
	int status = STATUS_FAIL;
	u8 cmd_status = 0;
	u8 data_format = 0;
	u16 act_length = 0;
	int length = 0;
	int rc;

	if (*ppos)
		goto exit;

	mutex_lock(&dad->sysfs_lock);

	pm_runtime_get_sync(dev);

	rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
	if (rc < 0) {
		dev_err(dev, "%s: Error on request exclusive r=%d\n",
				__func__, rc);
		goto put_pm_runtime;
	}

	rc = cyttsp5_suspend_scan_cmd_(dev);
	if (rc < 0) {
		dev_err(dev, "%s: Error on suspend scan r=%d\n",
				__func__, rc);
		goto release_exclusive;
	}

	rc = cyttsp5_get_data_structure_cmd_(dev, 0, PIP_CMD_MAX_LENGTH,
			dad->get_idac_data_id, &cmd_status, &data_format,
			&act_length, &dad->ic_buf[5]);
	if (rc < 0) {
		dev_err(dev, "%s: Error on get data structure r=%d\n",
				__func__, rc);
		goto resume_scan;
	}

	dad->ic_buf[0] = cmd_status;
	dad->ic_buf[1] = dad->get_idac_data_id;
	dad->ic_buf[2] = LOW_BYTE(act_length);
	dad->ic_buf[3] = HI_BYTE(act_length);
	dad->ic_buf[4] = data_format;

	length = 5 + act_length;

	status = STATUS_SUCCESS;

resume_scan:
	cyttsp5_resume_scan_cmd_(dev);

release_exclusive:
	cmd->release_exclusive(dev);

put_pm_runtime:
	pm_runtime_put(dev);

	if (status == STATUS_FAIL)
		length = 0;

	data->pr_buf_len = prepare_print_buffer(status, dad->ic_buf, length,
			data->pr_buf, sizeof(data->pr_buf));

	mutex_unlock(&dad->sysfs_lock);

exit:
	return simple_read_from_buffer(buf, count, ppos, data->pr_buf,
			data->pr_buf_len);
}
Пример #2
0
static int dwc3_omap_probe(struct platform_device *pdev)
{
	struct device_node	*node = pdev->dev.of_node;

	struct dwc3_omap	*omap;
	struct resource		*res;
	struct device		*dev = &pdev->dev;
	struct regulator	*vbus_reg = NULL;

	int			ret;
	int			irq;

	u32			reg;

	void __iomem		*base;

	if (!node) {
		dev_err(dev, "device node not found\n");
		return -EINVAL;
	}

	omap = devm_kzalloc(dev, sizeof(*omap), GFP_KERNEL);
	if (!omap) {
		dev_err(dev, "not enough memory\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pdev, omap);

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(dev, "missing IRQ resource\n");
		return -EINVAL;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	base = devm_ioremap_resource(dev, res);
	if (IS_ERR(base))
		return PTR_ERR(base);

	if (of_property_read_bool(node, "vbus-supply")) {
		vbus_reg = devm_regulator_get(dev, "vbus");
		if (IS_ERR(vbus_reg)) {
			dev_err(dev, "vbus init failed\n");
			return PTR_ERR(vbus_reg);
		}
	}

	omap->dev	= dev;
	omap->irq	= irq;
	omap->base	= base;
	omap->vbus_reg	= vbus_reg;
	dev->dma_mask	= &dwc3_omap_dma_mask;

	pm_runtime_enable(dev);
	ret = pm_runtime_get_sync(dev);
	if (ret < 0) {
		dev_err(dev, "get_sync failed with err %d\n", ret);
		goto err0;
	}

	dwc3_omap_map_offset(omap);
	dwc3_omap_set_utmi_mode(omap);

	/* check the DMA Status */
	reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
	omap->dma_status = !!(reg & USBOTGSS_SYSCONFIG_DMADISABLE);

	ret = devm_request_irq(dev, omap->irq, dwc3_omap_interrupt, 0,
			"dwc3-omap", omap);
	if (ret) {
		dev_err(dev, "failed to request IRQ #%d --> %d\n",
				omap->irq, ret);
		goto err1;
	}

	dwc3_omap_enable_irqs(omap);

	ret = dwc3_omap_extcon_register(omap);
	if (ret < 0)
		goto err2;

	ret = of_platform_populate(node, NULL, NULL, dev);
	if (ret) {
		dev_err(&pdev->dev, "failed to create dwc3 core\n");
		goto err3;
	}

	return 0;

err3:
	if (omap->extcon_vbus_dev.edev)
		extcon_unregister_interest(&omap->extcon_vbus_dev);
	if (omap->extcon_id_dev.edev)
		extcon_unregister_interest(&omap->extcon_id_dev);

err2:
	dwc3_omap_disable_irqs(omap);

err1:
	pm_runtime_put_sync(dev);

err0:
	pm_runtime_disable(dev);

	return ret;
}
Пример #3
0
static int __devinit ti_tscadc_probe(struct platform_device *pdev)
{
	struct ti_tscadc_dev	*tscadc;
	struct resource		*res;
	struct clk		*clk;
	struct mfd_tscadc_board	*pdata = pdev->dev.platform_data;
	struct mfd_cell		*cell;
	int			err, ctrl, children = 0;
	int			clk_value, clock_rate;
	int			tsc_wires = 0, adc_channels = 0, total_channels;

	if (!pdata) {
		dev_err(&pdev->dev, "Could not find platform data\n");
		return -EINVAL;
	}

	if (pdata->adc_init)
		adc_channels = pdata->adc_init->adc_channels;

	if (pdata->tsc_init)
		tsc_wires = pdata->tsc_init->wires;

	total_channels = tsc_wires + adc_channels;

	if (total_channels > 8) {
		dev_err(&pdev->dev, "Number of i/p channels more than 8\n");
		return -EINVAL;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "no memory resource defined.\n");
		return -EINVAL;
	}

	/* Allocate memory for device */
	tscadc = kzalloc(sizeof(struct ti_tscadc_dev), GFP_KERNEL);
	if (!tscadc) {
		dev_err(&pdev->dev, "failed to allocate memory.\n");
		return -ENOMEM;
	}

	res = request_mem_region(res->start, resource_size(res),
			pdev->name);
	if (!res) {
		dev_err(&pdev->dev, "failed to reserve registers.\n");
		err = -EBUSY;
		goto err_free_mem;
	}

	tscadc->tscadc_base = ioremap(res->start, resource_size(res));
	if (!tscadc->tscadc_base) {
		dev_err(&pdev->dev, "failed to map registers.\n");
		err = -ENOMEM;
		goto err_release_mem;
	}

	tscadc->irq = platform_get_irq(pdev, 0);
	if (tscadc->irq < 0) {
		dev_err(&pdev->dev, "no irq ID is specified.\n");
		return -ENODEV;
	}

	tscadc->dev = &pdev->dev;

	pm_runtime_enable(&pdev->dev);
	pm_runtime_get_sync(&pdev->dev);

	/*
	 * The TSC_ADC_Subsystem has 2 clock domains
	 * OCP_CLK and ADC_CLK.
	 * The ADC clock is expected to run at target of 3MHz,
	 * and expected to capture 12-bit data at a rate of 200 KSPS.
	 * The TSC_ADC_SS controller design assumes the OCP clock is
	 * at least 6x faster than the ADC clock.
	 */
	clk = clk_get(&pdev->dev, "adc_tsc_fck");
	if (IS_ERR(clk)) {
		dev_err(&pdev->dev, "failed to get TSC fck\n");
		err = PTR_ERR(clk);
		goto err_fail;
	}
	clock_rate = clk_get_rate(clk);
	clk_put(clk);
	clk_value = clock_rate / ADC_CLK;
	/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
	clk_value = clk_value - 1;
	tscadc_writel(tscadc, TSCADC_REG_CLKDIV, clk_value);

	/* Set the control register bits */
	ctrl = TSCADC_CNTRLREG_STEPCONFIGWRT |
			TSCADC_CNTRLREG_STEPID;
	if (pdata->tsc_init)
		ctrl |= TSCADC_CNTRLREG_4WIRE |
				TSCADC_CNTRLREG_TSCENB;
	tscadc_writel(tscadc, TSCADC_REG_CTRL, ctrl);

	/* Set register bits for Idle Config Mode */
	if (pdata->tsc_init)
		tscadc_idle_config(tscadc);

	/* Enable the TSC module enable bit */
	ctrl = tscadc_readl(tscadc, TSCADC_REG_CTRL);
	ctrl |= TSCADC_CNTRLREG_TSCSSENB;
	tscadc_writel(tscadc, TSCADC_REG_CTRL, ctrl);

	/* TSC Cell */
	if (pdata->tsc_init) {
		cell = &tscadc->cells[children];
		cell->name = "tsc";
		cell->platform_data = tscadc;
		cell->pdata_size = sizeof(*tscadc);
		children++;
	}

	/* ADC Cell */
	if (pdata->adc_init) {
		cell = &tscadc->cells[children];
		cell->name = "tiadc";
		cell->platform_data = tscadc;
		cell->pdata_size = sizeof(*tscadc);
		children++;
	}

	err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
			children, NULL, 0);
	if (err < 0)
		goto err_fail;

	device_init_wakeup(&pdev->dev, true);
	platform_set_drvdata(pdev, tscadc);
	return 0;

err_fail:
	pm_runtime_put_sync(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
	iounmap(tscadc->tscadc_base);
err_release_mem:
	release_mem_region(res->start, resource_size(res));
	mfd_remove_devices(tscadc->dev);
err_free_mem:
	platform_set_drvdata(pdev, NULL);
	kfree(tscadc);
	return err;
}
/*
 ****************************************************************************
 * - Device operation such as;
 *   probe, init/exit, remove
 ****************************************************************************
 */
static int __devinit lm3561_probe(struct i2c_client *client,
	  const struct i2c_device_id *id)
{
	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
	struct lm3561_platform_data *pdata = client->dev.platform_data;
	struct lm3561_drv_data *data;
	int result;

	dev_dbg(&client->dev, "%s\n", __func__);

	if (!pdata) {
		dev_err(&client->dev,
			"%s(): failed during init",
				__func__);
		return -EINVAL;
	}

	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
		dev_err(&client->dev,
			"%s(): failed during i2c_check_functionality",
			__func__);
		return -EIO;
	}

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data) {
		dev_err(&client->dev, "%s(): failed during kzalloc", __func__);
		return -ENOMEM;
	}

	dev_set_drvdata(&client->dev, data);
	data->client = client;
	data->led_nums = 1;
	data->torch_current_shift = 0;
	data->flash_current_shift = 0;
	if (pdata->current_limit >= 1500000) {
		data->reg_flash_duration_limit =
			LM3561_FLASH_DURATION_CL_1500MA;
	} else if (pdata->current_limit >= 1000000) {
		data->reg_flash_duration_limit =
			LM3561_FLASH_DURATION_CL_1000MA;
	} else {
		/* current_limit > 1500000uA || current_limit < 1000000uA */
		dev_err(&data->client->dev,
			"%s(): current_limit(%luuA) is invalid\n",
			__func__, pdata->current_limit);
		result = -EINVAL;
		goto err_init;
	}
	mutex_init(&data->lock);
	pm_runtime_enable(&client->dev);
	pm_suspend_ignore_children(&client->dev, true);
	result = pm_runtime_get_sync(&client->dev);
	if (result < 0)
		goto err_setup;
	result = lm3561_chip_init(data, pdata);
	if (result) {
		dev_err(&client->dev, "%s:chip init error\n", __func__);
		goto err_chip_init;
	}
	result = lm3561_create_sysfs_interfaces(&client->dev);
	if (result) {
		dev_err(&data->client->dev,
			"%s(): create sysfs failed",
				__func__);
		goto err_chip_init;
	}
	pm_runtime_set_autosuspend_delay(&client->dev, autosuspend_delay_ms);
	pm_runtime_use_autosuspend(&client->dev);
	pm_runtime_mark_last_busy(&data->client->dev);
	pm_runtime_put_autosuspend(&data->client->dev);
	dev_info(&data->client->dev, "%s: loaded\n", __func__);
	return 0;

err_chip_init:
	pm_runtime_suspend(&client->dev);
err_setup:
	pm_runtime_disable(&client->dev);
	if (pdata->platform_init)
		pdata->platform_init(&client->dev, 0);
err_init:
	dev_set_drvdata(&client->dev, NULL);
	kfree(data);
	dev_err(&client->dev,
		"%s: failed with code %d.\n", __func__, result);

	return result;
}
Пример #5
0
static int rvin_reset_format(struct rvin_dev *vin)
{
	struct v4l2_subdev_format fmt = {
		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
		.pad = vin->parallel->source_pad,
	};
	int ret;

	ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt);
	if (ret)
		return ret;

	v4l2_fill_pix_format(&vin->format, &fmt.format);

	rvin_format_align(vin, &vin->format);

	vin->source.top = 0;
	vin->source.left = 0;
	vin->source.width = vin->format.width;
	vin->source.height = vin->format.height;

	vin->crop = vin->source;
	vin->compose = vin->source;

	return 0;
}

static int rvin_try_format(struct rvin_dev *vin, u32 which,
			   struct v4l2_pix_format *pix,
			   struct v4l2_rect *crop, struct v4l2_rect *compose)
{
	struct v4l2_subdev *sd = vin_to_source(vin);
	struct v4l2_subdev_pad_config *pad_cfg;
	struct v4l2_subdev_format format = {
		.which = which,
		.pad = vin->parallel->source_pad,
	};
	enum v4l2_field field;
	u32 width, height;
	int ret;

	pad_cfg = v4l2_subdev_alloc_pad_config(sd);
	if (pad_cfg == NULL)
		return -ENOMEM;

	if (!rvin_format_from_pixel(pix->pixelformat) ||
	    (vin->info->model == RCAR_M1 &&
	     pix->pixelformat == V4L2_PIX_FMT_XBGR32))
		pix->pixelformat = RVIN_DEFAULT_FORMAT;

	v4l2_fill_mbus_format(&format.format, pix, vin->mbus_code);

	/* Allow the video device to override field and to scale */
	field = pix->field;
	width = pix->width;
	height = pix->height;

	ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
	if (ret < 0 && ret != -ENOIOCTLCMD)
		goto done;

	v4l2_fill_pix_format(pix, &format.format);

	if (crop) {
		crop->top = 0;
		crop->left = 0;
		crop->width = pix->width;
		crop->height = pix->height;

		/*
		 * If source is ALTERNATE the driver will use the VIN hardware
		 * to INTERLACE it. The crop height then needs to be doubled.
		 */
		if (pix->field == V4L2_FIELD_ALTERNATE)
			crop->height *= 2;
	}

	if (field != V4L2_FIELD_ANY)
		pix->field = field;

	pix->width = width;
	pix->height = height;

	rvin_format_align(vin, pix);

	if (compose) {
		compose->top = 0;
		compose->left = 0;
		compose->width = pix->width;
		compose->height = pix->height;
	}
done:
	v4l2_subdev_free_pad_config(pad_cfg);

	return 0;
}

static int rvin_querycap(struct file *file, void *priv,
			 struct v4l2_capability *cap)
{
	struct rvin_dev *vin = video_drvdata(file);

	strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
	strscpy(cap->card, "R_Car_VIN", sizeof(cap->card));
	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
		 dev_name(vin->dev));
	return 0;
}

static int rvin_try_fmt_vid_cap(struct file *file, void *priv,
				struct v4l2_format *f)
{
	struct rvin_dev *vin = video_drvdata(file);

	return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL,
			       NULL);
}

static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
			      struct v4l2_format *f)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_rect crop, compose;
	int ret;

	if (vb2_is_busy(&vin->queue))
		return -EBUSY;

	ret = rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
			      &crop, &compose);
	if (ret)
		return ret;

	vin->format = f->fmt.pix;
	vin->crop = crop;
	vin->compose = compose;
	vin->source = crop;

	return 0;
}

static int rvin_g_fmt_vid_cap(struct file *file, void *priv,
			      struct v4l2_format *f)
{
	struct rvin_dev *vin = video_drvdata(file);

	f->fmt.pix = vin->format;

	return 0;
}

static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
				 struct v4l2_fmtdesc *f)
{
	if (f->index >= ARRAY_SIZE(rvin_formats))
		return -EINVAL;

	f->pixelformat = rvin_formats[f->index].fourcc;

	return 0;
}

static int rvin_g_selection(struct file *file, void *fh,
			    struct v4l2_selection *s)
{
	struct rvin_dev *vin = video_drvdata(file);

	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
		return -EINVAL;

	switch (s->target) {
	case V4L2_SEL_TGT_CROP_BOUNDS:
	case V4L2_SEL_TGT_CROP_DEFAULT:
		s->r.left = s->r.top = 0;
		s->r.width = vin->source.width;
		s->r.height = vin->source.height;
		break;
	case V4L2_SEL_TGT_CROP:
		s->r = vin->crop;
		break;
	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
		s->r.left = s->r.top = 0;
		s->r.width = vin->format.width;
		s->r.height = vin->format.height;
		break;
	case V4L2_SEL_TGT_COMPOSE:
		s->r = vin->compose;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static int rvin_s_selection(struct file *file, void *fh,
			    struct v4l2_selection *s)
{
	struct rvin_dev *vin = video_drvdata(file);
	const struct rvin_video_format *fmt;
	struct v4l2_rect r = s->r;
	struct v4l2_rect max_rect;
	struct v4l2_rect min_rect = {
		.width = 6,
		.height = 2,
	};

	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
		return -EINVAL;

	v4l2_rect_set_min_size(&r, &min_rect);

	switch (s->target) {
	case V4L2_SEL_TGT_CROP:
		/* Can't crop outside of source input */
		max_rect.top = max_rect.left = 0;
		max_rect.width = vin->source.width;
		max_rect.height = vin->source.height;
		v4l2_rect_map_inside(&r, &max_rect);

		v4l_bound_align_image(&r.width, 6, vin->source.width, 0,
				      &r.height, 2, vin->source.height, 0, 0);

		r.top  = clamp_t(s32, r.top, 0, vin->source.height - r.height);
		r.left = clamp_t(s32, r.left, 0, vin->source.width - r.width);

		vin->crop = s->r = r;

		vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
			r.width, r.height, r.left, r.top,
			vin->source.width, vin->source.height);
		break;
	case V4L2_SEL_TGT_COMPOSE:
		/* Make sure compose rect fits inside output format */
		max_rect.top = max_rect.left = 0;
		max_rect.width = vin->format.width;
		max_rect.height = vin->format.height;
		v4l2_rect_map_inside(&r, &max_rect);

		/*
		 * Composing is done by adding a offset to the buffer address,
		 * the HW wants this address to be aligned to HW_BUFFER_MASK.
		 * Make sure the top and left values meets this requirement.
		 */
		while ((r.top * vin->format.bytesperline) & HW_BUFFER_MASK)
			r.top--;

		fmt = rvin_format_from_pixel(vin->format.pixelformat);
		while ((r.left * fmt->bpp) & HW_BUFFER_MASK)
			r.left--;

		vin->compose = s->r = r;

		vin_dbg(vin, "Compose %dx%d@%d:%d in %dx%d\n",
			r.width, r.height, r.left, r.top,
			vin->format.width, vin->format.height);
		break;
	default:
		return -EINVAL;
	}

	/* HW supports modifying configuration while running */
	rvin_crop_scale_comp(vin);

	return 0;
}

static int rvin_g_pixelaspect(struct file *file, void *priv,
			      int type, struct v4l2_fract *f)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_subdev *sd = vin_to_source(vin);

	if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
		return -EINVAL;

	return v4l2_subdev_call(sd, video, g_pixelaspect, f);
}

static int rvin_enum_input(struct file *file, void *priv,
			   struct v4l2_input *i)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_subdev *sd = vin_to_source(vin);
	int ret;

	if (i->index != 0)
		return -EINVAL;

	ret = v4l2_subdev_call(sd, video, g_input_status, &i->status);
	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
		return ret;

	i->type = V4L2_INPUT_TYPE_CAMERA;

	if (v4l2_subdev_has_op(sd, pad, dv_timings_cap)) {
		i->capabilities = V4L2_IN_CAP_DV_TIMINGS;
		i->std = 0;
	} else {
		i->capabilities = V4L2_IN_CAP_STD;
		i->std = vin->vdev.tvnorms;
	}

	strscpy(i->name, "Camera", sizeof(i->name));

	return 0;
}

static int rvin_g_input(struct file *file, void *priv, unsigned int *i)
{
	*i = 0;
	return 0;
}

static int rvin_s_input(struct file *file, void *priv, unsigned int i)
{
	if (i > 0)
		return -EINVAL;
	return 0;
}

static int rvin_querystd(struct file *file, void *priv, v4l2_std_id *a)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_subdev *sd = vin_to_source(vin);

	return v4l2_subdev_call(sd, video, querystd, a);
}

static int rvin_s_std(struct file *file, void *priv, v4l2_std_id a)
{
	struct rvin_dev *vin = video_drvdata(file);
	int ret;

	ret = v4l2_subdev_call(vin_to_source(vin), video, s_std, a);
	if (ret < 0)
		return ret;

	vin->std = a;

	/* Changing the standard will change the width/height */
	return rvin_reset_format(vin);
}

static int rvin_g_std(struct file *file, void *priv, v4l2_std_id *a)
{
	struct rvin_dev *vin = video_drvdata(file);

	if (v4l2_subdev_has_op(vin_to_source(vin), pad, dv_timings_cap))
		return -ENOIOCTLCMD;

	*a = vin->std;

	return 0;
}

static int rvin_subscribe_event(struct v4l2_fh *fh,
				const struct v4l2_event_subscription *sub)
{
	switch (sub->type) {
	case V4L2_EVENT_SOURCE_CHANGE:
		return v4l2_event_subscribe(fh, sub, 4, NULL);
	}
	return v4l2_ctrl_subscribe_event(fh, sub);
}

static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
				struct v4l2_enum_dv_timings *timings)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_subdev *sd = vin_to_source(vin);
	int ret;

	if (timings->pad)
		return -EINVAL;

	timings->pad = vin->parallel->sink_pad;

	ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);

	timings->pad = 0;

	return ret;
}

static int rvin_s_dv_timings(struct file *file, void *priv_fh,
			     struct v4l2_dv_timings *timings)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_subdev *sd = vin_to_source(vin);
	int ret;

	ret = v4l2_subdev_call(sd, video, s_dv_timings, timings);
	if (ret)
		return ret;

	/* Changing the timings will change the width/height */
	return rvin_reset_format(vin);
}

static int rvin_g_dv_timings(struct file *file, void *priv_fh,
			     struct v4l2_dv_timings *timings)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_subdev *sd = vin_to_source(vin);

	return v4l2_subdev_call(sd, video, g_dv_timings, timings);
}

static int rvin_query_dv_timings(struct file *file, void *priv_fh,
				 struct v4l2_dv_timings *timings)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_subdev *sd = vin_to_source(vin);

	return v4l2_subdev_call(sd, video, query_dv_timings, timings);
}

static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
			       struct v4l2_dv_timings_cap *cap)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_subdev *sd = vin_to_source(vin);
	int ret;

	if (cap->pad)
		return -EINVAL;

	cap->pad = vin->parallel->sink_pad;

	ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);

	cap->pad = 0;

	return ret;
}

static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_subdev *sd = vin_to_source(vin);
	int ret;

	if (edid->pad)
		return -EINVAL;

	edid->pad = vin->parallel->sink_pad;

	ret = v4l2_subdev_call(sd, pad, get_edid, edid);

	edid->pad = 0;

	return ret;
}

static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
{
	struct rvin_dev *vin = video_drvdata(file);
	struct v4l2_subdev *sd = vin_to_source(vin);
	int ret;

	if (edid->pad)
		return -EINVAL;

	edid->pad = vin->parallel->sink_pad;

	ret = v4l2_subdev_call(sd, pad, set_edid, edid);

	edid->pad = 0;

	return ret;
}

static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
	.vidioc_querycap		= rvin_querycap,
	.vidioc_try_fmt_vid_cap		= rvin_try_fmt_vid_cap,
	.vidioc_g_fmt_vid_cap		= rvin_g_fmt_vid_cap,
	.vidioc_s_fmt_vid_cap		= rvin_s_fmt_vid_cap,
	.vidioc_enum_fmt_vid_cap	= rvin_enum_fmt_vid_cap,

	.vidioc_g_selection		= rvin_g_selection,
	.vidioc_s_selection		= rvin_s_selection,

	.vidioc_g_pixelaspect		= rvin_g_pixelaspect,

	.vidioc_enum_input		= rvin_enum_input,
	.vidioc_g_input			= rvin_g_input,
	.vidioc_s_input			= rvin_s_input,

	.vidioc_dv_timings_cap		= rvin_dv_timings_cap,
	.vidioc_enum_dv_timings		= rvin_enum_dv_timings,
	.vidioc_g_dv_timings		= rvin_g_dv_timings,
	.vidioc_s_dv_timings		= rvin_s_dv_timings,
	.vidioc_query_dv_timings	= rvin_query_dv_timings,

	.vidioc_g_edid			= rvin_g_edid,
	.vidioc_s_edid			= rvin_s_edid,

	.vidioc_querystd		= rvin_querystd,
	.vidioc_g_std			= rvin_g_std,
	.vidioc_s_std			= rvin_s_std,

	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
	.vidioc_querybuf		= vb2_ioctl_querybuf,
	.vidioc_qbuf			= vb2_ioctl_qbuf,
	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
	.vidioc_expbuf			= vb2_ioctl_expbuf,
	.vidioc_prepare_buf		= vb2_ioctl_prepare_buf,
	.vidioc_streamon		= vb2_ioctl_streamon,
	.vidioc_streamoff		= vb2_ioctl_streamoff,

	.vidioc_log_status		= v4l2_ctrl_log_status,
	.vidioc_subscribe_event		= rvin_subscribe_event,
	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
};

/* -----------------------------------------------------------------------------
 * V4L2 Media Controller
 */

static void rvin_mc_try_format(struct rvin_dev *vin,
			       struct v4l2_pix_format *pix)
{
	/*
	 * The V4L2 specification clearly documents the colorspace fields
	 * as being set by drivers for capture devices. Using the values
	 * supplied by userspace thus wouldn't comply with the API. Until
	 * the API is updated force fixed vaules.
	 */
	pix->colorspace = RVIN_DEFAULT_COLORSPACE;
	pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
	pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
	pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace,
							  pix->ycbcr_enc);

	rvin_format_align(vin, pix);
}

static int rvin_mc_try_fmt_vid_cap(struct file *file, void *priv,
				   struct v4l2_format *f)
{
	struct rvin_dev *vin = video_drvdata(file);

	rvin_mc_try_format(vin, &f->fmt.pix);

	return 0;
}

static int rvin_mc_s_fmt_vid_cap(struct file *file, void *priv,
				 struct v4l2_format *f)
{
	struct rvin_dev *vin = video_drvdata(file);

	if (vb2_is_busy(&vin->queue))
		return -EBUSY;

	rvin_mc_try_format(vin, &f->fmt.pix);

	vin->format = f->fmt.pix;

	vin->crop.top = 0;
	vin->crop.left = 0;
	vin->crop.width = vin->format.width;
	vin->crop.height = vin->format.height;
	vin->compose = vin->crop;

	return 0;
}

static int rvin_mc_enum_input(struct file *file, void *priv,
			      struct v4l2_input *i)
{
	if (i->index != 0)
		return -EINVAL;

	i->type = V4L2_INPUT_TYPE_CAMERA;
	strscpy(i->name, "Camera", sizeof(i->name));

	return 0;
}

static const struct v4l2_ioctl_ops rvin_mc_ioctl_ops = {
	.vidioc_querycap		= rvin_querycap,
	.vidioc_try_fmt_vid_cap		= rvin_mc_try_fmt_vid_cap,
	.vidioc_g_fmt_vid_cap		= rvin_g_fmt_vid_cap,
	.vidioc_s_fmt_vid_cap		= rvin_mc_s_fmt_vid_cap,
	.vidioc_enum_fmt_vid_cap	= rvin_enum_fmt_vid_cap,

	.vidioc_enum_input		= rvin_mc_enum_input,
	.vidioc_g_input			= rvin_g_input,
	.vidioc_s_input			= rvin_s_input,

	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
	.vidioc_querybuf		= vb2_ioctl_querybuf,
	.vidioc_qbuf			= vb2_ioctl_qbuf,
	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
	.vidioc_expbuf			= vb2_ioctl_expbuf,
	.vidioc_prepare_buf		= vb2_ioctl_prepare_buf,
	.vidioc_streamon		= vb2_ioctl_streamon,
	.vidioc_streamoff		= vb2_ioctl_streamoff,

	.vidioc_log_status		= v4l2_ctrl_log_status,
	.vidioc_subscribe_event		= rvin_subscribe_event,
	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
};

/* -----------------------------------------------------------------------------
 * File Operations
 */

static int rvin_power_on(struct rvin_dev *vin)
{
	int ret;
	struct v4l2_subdev *sd = vin_to_source(vin);

	pm_runtime_get_sync(vin->v4l2_dev.dev);

	ret = v4l2_subdev_call(sd, core, s_power, 1);
	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
		return ret;
	return 0;
}
Пример #6
0
void scsi_autopm_get_target(struct scsi_target *starget)
{
	pm_runtime_get_sync(&starget->dev);
}
Пример #7
0
/**
 * spi_mem_exec_op() - Execute a memory operation
 * @mem: the SPI memory
 * @op: the memory operation to execute
 *
 * Executes a memory operation.
 *
 * This function first checks that @op is supported and then tries to execute
 * it.
 *
 * Return: 0 in case of success, a negative error code otherwise.
 */
int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
	struct spi_controller *ctlr = mem->spi->controller;
	struct spi_transfer xfers[4] = { };
	struct spi_message msg;
	u8 *tmpbuf;
	int ret;

	ret = spi_mem_check_op(op);
	if (ret)
		return ret;

	if (!spi_mem_internal_supports_op(mem, op))
		return -ENOTSUPP;

	if (ctlr->mem_ops) {
		/*
		 * Flush the message queue before executing our SPI memory
		 * operation to prevent preemption of regular SPI transfers.
		 */
		spi_flush_queue(ctlr);

		if (ctlr->auto_runtime_pm) {
			ret = pm_runtime_get_sync(ctlr->dev.parent);
			if (ret < 0) {
				dev_err(&ctlr->dev,
					"Failed to power device: %d\n",
					ret);
				return ret;
			}
		}

		mutex_lock(&ctlr->bus_lock_mutex);
		mutex_lock(&ctlr->io_mutex);
		ret = ctlr->mem_ops->exec_op(mem, op);
		mutex_unlock(&ctlr->io_mutex);
		mutex_unlock(&ctlr->bus_lock_mutex);

		if (ctlr->auto_runtime_pm)
			pm_runtime_put(ctlr->dev.parent);

		/*
		 * Some controllers only optimize specific paths (typically the
		 * read path) and expect the core to use the regular SPI
		 * interface in other cases.
		 */
		if (!ret || ret != -ENOTSUPP)
			return ret;
	}

	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
		     op->dummy.nbytes;

	/*
	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
	 * we're guaranteed that this buffer is DMA-able, as required by the
	 * SPI layer.
	 */
	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
	if (!tmpbuf)
		return -ENOMEM;

	spi_message_init(&msg);

	tmpbuf[0] = op->cmd.opcode;
	xfers[xferpos].tx_buf = tmpbuf;
	xfers[xferpos].len = sizeof(op->cmd.opcode);
	xfers[xferpos].tx_nbits = op->cmd.buswidth;
	spi_message_add_tail(&xfers[xferpos], &msg);
	xferpos++;
	totalxferlen++;

	if (op->addr.nbytes) {
		int i;

		for (i = 0; i < op->addr.nbytes; i++)
			tmpbuf[i + 1] = op->addr.val >>
					(8 * (op->addr.nbytes - i - 1));

		xfers[xferpos].tx_buf = tmpbuf + 1;
		xfers[xferpos].len = op->addr.nbytes;
		xfers[xferpos].tx_nbits = op->addr.buswidth;
		spi_message_add_tail(&xfers[xferpos], &msg);
		xferpos++;
		totalxferlen += op->addr.nbytes;
	}
Пример #8
0
static int s3c_camif_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct s3c_camif_plat_data *pdata = dev->platform_data;
	struct s3c_camif_drvdata *drvdata;
	struct camif_dev *camif;
	struct resource *mres;
	int ret = 0;

	camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL);
	if (!camif)
		return -ENOMEM;

	spin_lock_init(&camif->slock);
	mutex_init(&camif->lock);

	camif->dev = dev;

	if (!pdata || !pdata->gpio_get || !pdata->gpio_put) {
		dev_err(dev, "wrong platform data\n");
		return -EINVAL;
	}

	camif->pdata = *pdata;
	drvdata = (void *)platform_get_device_id(pdev)->driver_data;
	camif->variant = drvdata->variant;

	mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	camif->io_base = devm_ioremap_resource(dev, mres);
	if (IS_ERR(camif->io_base))
		return PTR_ERR(camif->io_base);

	ret = camif_request_irqs(pdev, camif);
	if (ret < 0)
		return ret;

	ret = pdata->gpio_get();
	if (ret < 0)
		return ret;

	ret = s3c_camif_create_subdev(camif);
	if (ret < 0)
		goto err_sd;

	ret = camif_clk_get(camif);
	if (ret < 0)
		goto err_clk;

	platform_set_drvdata(pdev, camif);
	clk_set_rate(camif->clock[CLK_CAM],
			camif->pdata.sensor.clock_frequency);

	dev_info(dev, "sensor clock frequency: %lu\n",
		 clk_get_rate(camif->clock[CLK_CAM]));
	/*
	 * Set initial pixel format, resolution and crop rectangle.
	 * Must be done before a sensor subdev is registered as some
	 * settings are overrode with values from sensor subdev.
	 */
	s3c_camif_set_defaults(camif);

	pm_runtime_enable(dev);

	ret = pm_runtime_get_sync(dev);
	if (ret < 0)
		goto err_pm;

	ret = camif_media_dev_init(camif);
	if (ret < 0)
		goto err_alloc;

	ret = camif_register_sensor(camif);
	if (ret < 0)
		goto err_sens;

	ret = v4l2_device_register_subdev(&camif->v4l2_dev, &camif->subdev);
	if (ret < 0)
		goto err_sens;

	ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
	if (ret < 0)
		goto err_sens;

	ret = camif_register_video_nodes(camif);
	if (ret < 0)
		goto err_sens;

	ret = camif_create_media_links(camif);
	if (ret < 0)
		goto err_sens;

	ret = media_device_register(&camif->media_dev);
	if (ret < 0)
		goto err_sens;

	pm_runtime_put(dev);
	return 0;

err_sens:
	v4l2_device_unregister(&camif->v4l2_dev);
	media_device_unregister(&camif->media_dev);
	media_device_cleanup(&camif->media_dev);
	camif_unregister_media_entities(camif);
err_alloc:
	pm_runtime_put(dev);
	pm_runtime_disable(dev);
err_pm:
	camif_clk_put(camif);
err_clk:
	s3c_camif_unregister_subdev(camif);
err_sd:
	pdata->gpio_put();
	return ret;
}
Пример #9
0
static int dwc3_of_simple_probe(struct platform_device *pdev)
{
	struct dwc3_of_simple	*simple;
	struct device		*dev = &pdev->dev;
	struct device_node	*np = dev->of_node;

	int			ret;
	int			i;
	bool			shared_resets = false;

	simple = devm_kzalloc(dev, sizeof(*simple), GFP_KERNEL);
	if (!simple)
		return -ENOMEM;

	platform_set_drvdata(pdev, simple);
	simple->dev = dev;

	/*
	 * Some controllers need to toggle the usb3-otg reset before trying to
	 * initialize the PHY, otherwise the PHY times out.
	 */
	if (of_device_is_compatible(np, "rockchip,rk3399-dwc3"))
		simple->need_reset = true;

	if (of_device_is_compatible(np, "amlogic,meson-axg-dwc3") ||
	    of_device_is_compatible(np, "amlogic,meson-gxl-dwc3")) {
		shared_resets = true;
		simple->pulse_resets = true;
	}

	simple->resets = of_reset_control_array_get(np, shared_resets, true);
	if (IS_ERR(simple->resets)) {
		ret = PTR_ERR(simple->resets);
		dev_err(dev, "failed to get device resets, err=%d\n", ret);
		return ret;
	}

	if (simple->pulse_resets) {
		ret = reset_control_reset(simple->resets);
		if (ret)
			goto err_resetc_put;
	} else {
		ret = reset_control_deassert(simple->resets);
		if (ret)
			goto err_resetc_put;
	}

	ret = dwc3_of_simple_clk_init(simple, of_count_phandle_with_args(np,
						"clocks", "#clock-cells"));
	if (ret)
		goto err_resetc_assert;

	ret = of_platform_populate(np, NULL, NULL, dev);
	if (ret) {
		for (i = 0; i < simple->num_clocks; i++) {
			clk_disable_unprepare(simple->clks[i]);
			clk_put(simple->clks[i]);
		}

		goto err_resetc_assert;
	}

	pm_runtime_set_active(dev);
	pm_runtime_enable(dev);
	pm_runtime_get_sync(dev);

	return 0;

err_resetc_assert:
	if (!simple->pulse_resets)
		reset_control_assert(simple->resets);

err_resetc_put:
	reset_control_put(simple->resets);
	return ret;
}
Пример #10
0
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
	struct msm_drm_private *priv = dev->dev_private;
	struct platform_device *pdev;
	struct mdp5_kms *mdp5_kms;
	struct mdp5_cfg *config;
	struct msm_kms *kms;
	struct msm_gem_address_space *aspace;
	int irq, i, ret;

	/* priv->kms would have been populated by the MDP5 driver */
	kms = priv->kms;
	if (!kms)
		return NULL;

	mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));

	mdp_kms_init(&mdp5_kms->base, &kms_funcs);

	pdev = mdp5_kms->pdev;

	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
	if (irq < 0) {
		ret = irq;
		DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
		goto fail;
	}

	kms->irq = irq;

	config = mdp5_cfg_get_config(mdp5_kms->cfg);

	/* make sure things are off before attaching iommu (bootloader could
	 * have left things on, in which case we'll start getting faults if
	 * we don't disable):
	 */
	pm_runtime_get_sync(&pdev->dev);
	for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
		if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
		    !config->hw->intf.base[i])
			continue;
		mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);

		mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
	}
	mdelay(16);

	if (config->platform.iommu) {
		aspace = msm_gem_address_space_create(&pdev->dev,
				config->platform.iommu, "mdp5");
		if (IS_ERR(aspace)) {
			ret = PTR_ERR(aspace);
			goto fail;
		}

		kms->aspace = aspace;

		ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
				ARRAY_SIZE(iommu_ports));
		if (ret) {
			DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
				ret);
			goto fail;
		}
	} else {
		DRM_DEV_INFO(&pdev->dev,
			 "no iommu, fallback to phys contig buffers for scanout\n");
		aspace = NULL;
	}

	pm_runtime_put_sync(&pdev->dev);

	ret = modeset_init(mdp5_kms);
	if (ret) {
		DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
		goto fail;
	}

	dev->mode_config.min_width = 0;
	dev->mode_config.min_height = 0;
	dev->mode_config.max_width = 0xffff;
	dev->mode_config.max_height = 0xffff;

	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
	dev->driver->get_scanout_position = mdp5_get_scanoutpos;
	dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
	dev->max_vblank_count = 0xffffffff;
	dev->vblank_disable_immediate = true;

	return kms;
fail:
	if (kms)
		mdp5_kms_destroy(kms);
	return ERR_PTR(ret);
}
Пример #11
0
static int intc_irqpin_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct renesas_intc_irqpin_config *pdata = dev->platform_data;
	const struct of_device_id *of_id;
	struct intc_irqpin_priv *p;
	struct intc_irqpin_iomem *i;
	struct resource *io[INTC_IRQPIN_REG_NR];
	struct resource *irq;
	struct irq_chip *irq_chip;
	void (*enable_fn)(struct irq_data *d);
	void (*disable_fn)(struct irq_data *d);
	const char *name = dev_name(dev);
	int ref_irq;
	int ret;
	int k;

	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
	if (!p) {
		dev_err(dev, "failed to allocate driver data\n");
		return -ENOMEM;
	}

	/* deal with driver instance configuration */
	if (pdata) {
		memcpy(&p->config, pdata, sizeof(*pdata));
	} else {
		of_property_read_u32(dev->of_node, "sense-bitfield-width",
				     &p->config.sense_bitfield_width);
		p->config.control_parent = of_property_read_bool(dev->of_node,
								 "control-parent");
	}
	if (!p->config.sense_bitfield_width)
		p->config.sense_bitfield_width = 4; /* default to 4 bits */

	p->pdev = pdev;
	platform_set_drvdata(pdev, p);

	p->clk = devm_clk_get(dev, NULL);
	if (IS_ERR(p->clk)) {
		dev_warn(dev, "unable to get clock\n");
		p->clk = NULL;
	}

	pm_runtime_enable(dev);
	pm_runtime_get_sync(dev);

	/* get hold of register banks */
	memset(io, 0, sizeof(io));
	for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
		io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k);
		if (!io[k] && k < INTC_IRQPIN_REG_NR_MANDATORY) {
			dev_err(dev, "not enough IOMEM resources\n");
			ret = -EINVAL;
			goto err0;
		}
	}

	/* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */
	for (k = 0; k < INTC_IRQPIN_MAX; k++) {
		irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
		if (!irq)
			break;

		p->irq[k].p = p;
		p->irq[k].requested_irq = irq->start;
	}

	p->number_of_irqs = k;
	if (p->number_of_irqs < 1) {
		dev_err(dev, "not enough IRQ resources\n");
		ret = -EINVAL;
		goto err0;
	}

	/* ioremap IOMEM and setup read/write callbacks */
	for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
		i = &p->iomem[k];

		/* handle optional registers */
		if (!io[k])
			continue;

		switch (resource_size(io[k])) {
		case 1:
			i->width = 8;
			i->read = intc_irqpin_read8;
			i->write = intc_irqpin_write8;
			break;
		case 4:
			i->width = 32;
			i->read = intc_irqpin_read32;
			i->write = intc_irqpin_write32;
			break;
		default:
			dev_err(dev, "IOMEM size mismatch\n");
			ret = -EINVAL;
			goto err0;
		}

		i->iomem = devm_ioremap_nocache(dev, io[k]->start,
						resource_size(io[k]));
		if (!i->iomem) {
			dev_err(dev, "failed to remap IOMEM\n");
			ret = -ENXIO;
			goto err0;
		}
	}

	/* configure "individual IRQ mode" where needed */
	of_id = of_match_device(intc_irqpin_dt_ids, dev);
	if (of_id && of_id->data) {
		const struct intc_irqpin_irlm_config *irlm_config = of_id->data;

		if (io[INTC_IRQPIN_REG_IRLM])
			intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM,
						      irlm_config->irlm_bit,
						      1, 1);
		else
			dev_warn(dev, "unable to select IRLM mode\n");
	}

	/* mask all interrupts using priority */
	for (k = 0; k < p->number_of_irqs; k++)
		intc_irqpin_mask_unmask_prio(p, k, 1);

	/* clear all pending interrupts */
	intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0);

	/* scan for shared interrupt lines */
	ref_irq = p->irq[0].requested_irq;
	p->shared_irqs = true;
	for (k = 1; k < p->number_of_irqs; k++) {
		if (ref_irq != p->irq[k].requested_irq) {
			p->shared_irqs = false;
			break;
		}
	}

	/* use more severe masking method if requested */
	if (p->config.control_parent) {
		enable_fn = intc_irqpin_irq_enable_force;
		disable_fn = intc_irqpin_irq_disable_force;
	} else if (!p->shared_irqs) {
		enable_fn = intc_irqpin_irq_enable;
		disable_fn = intc_irqpin_irq_disable;
	} else {
		enable_fn = intc_irqpin_shared_irq_enable;
		disable_fn = intc_irqpin_shared_irq_disable;
	}

	irq_chip = &p->irq_chip;
	irq_chip->name = name;
	irq_chip->irq_mask = disable_fn;
	irq_chip->irq_unmask = enable_fn;
	irq_chip->irq_set_type = intc_irqpin_irq_set_type;
	irq_chip->irq_set_wake = intc_irqpin_irq_set_wake;
	irq_chip->flags	= IRQCHIP_MASK_ON_SUSPEND;

	p->irq_domain = irq_domain_add_simple(dev->of_node,
					      p->number_of_irqs,
					      p->config.irq_base,
					      &intc_irqpin_irq_domain_ops, p);
	if (!p->irq_domain) {
		ret = -ENXIO;
		dev_err(dev, "cannot initialize irq domain\n");
		goto err0;
	}

	if (p->shared_irqs) {
		/* request one shared interrupt */
		if (devm_request_irq(dev, p->irq[0].requested_irq,
				intc_irqpin_shared_irq_handler,
				IRQF_SHARED, name, p)) {
			dev_err(dev, "failed to request low IRQ\n");
			ret = -ENOENT;
			goto err1;
		}
	} else {
		/* request interrupts one by one */
		for (k = 0; k < p->number_of_irqs; k++) {
			if (devm_request_irq(dev, p->irq[k].requested_irq,
					     intc_irqpin_irq_handler, 0, name,
					     &p->irq[k])) {
				dev_err(dev, "failed to request low IRQ\n");
				ret = -ENOENT;
				goto err1;
			}
		}
	}

	/* unmask all interrupts on prio level */
	for (k = 0; k < p->number_of_irqs; k++)
		intc_irqpin_mask_unmask_prio(p, k, 0);

	dev_info(dev, "driving %d irqs\n", p->number_of_irqs);

	/* warn in case of mismatch if irq base is specified */
	if (p->config.irq_base) {
		if (p->config.irq_base != p->irq[0].domain_irq)
			dev_warn(dev, "irq base mismatch (%d/%d)\n",
				 p->config.irq_base, p->irq[0].domain_irq);
	}

	return 0;

err1:
	irq_domain_remove(p->irq_domain);
err0:
	pm_runtime_put(dev);
	pm_runtime_disable(dev);
	return ret;
}
Пример #12
0
int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
		     struct analogix_dp_plat_data *plat_data)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct analogix_dp_device *dp;
	struct resource *res;
	unsigned int irq_flags;
	int ret;

	if (!plat_data) {
		dev_err(dev, "Invalided input plat_data\n");
		return -EINVAL;
	}

	dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL);
	if (!dp)
		return -ENOMEM;

	dev_set_drvdata(dev, dp);

	dp->dev = &pdev->dev;
	dp->dpms_mode = DRM_MODE_DPMS_OFF;

	mutex_init(&dp->panel_lock);
	dp->panel_is_modeset = false;

	/*
	 * platform dp driver need containor_of the plat_data to get
	 * the driver private data, so we need to store the point of
	 * plat_data, not the context of plat_data.
	 */
	dp->plat_data = plat_data;

	ret = analogix_dp_dt_parse_pdata(dp);
	if (ret)
		return ret;

	dp->phy = devm_phy_get(dp->dev, "dp");
	if (IS_ERR(dp->phy)) {
		dev_err(dp->dev, "no DP phy configured\n");
		ret = PTR_ERR(dp->phy);
		if (ret) {
			/*
			 * phy itself is not enabled, so we can move forward
			 * assigning NULL to phy pointer.
			 */
			if (ret == -ENOSYS || ret == -ENODEV)
				dp->phy = NULL;
			else
				return ret;
		}
	}

	dp->clock = devm_clk_get(&pdev->dev, "dp");
	if (IS_ERR(dp->clock)) {
		dev_err(&pdev->dev, "failed to get clock\n");
		return PTR_ERR(dp->clock);
	}

	clk_prepare_enable(dp->clock);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(dp->reg_base))
		return PTR_ERR(dp->reg_base);

	dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");

	dp->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpios", 0);
	if (!gpio_is_valid(dp->hpd_gpio))
		dp->hpd_gpio = of_get_named_gpio(dev->of_node,
						 "samsung,hpd-gpio", 0);

	if (gpio_is_valid(dp->hpd_gpio)) {
		/*
		 * Set up the hotplug GPIO from the device tree as an interrupt.
		 * Simply specifying a different interrupt in the device tree
		 * doesn't work since we handle hotplug rather differently when
		 * using a GPIO.  We also need the actual GPIO specifier so
		 * that we can get the current state of the GPIO.
		 */
		ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN,
					    "hpd_gpio");
		if (ret) {
			dev_err(&pdev->dev, "failed to get hpd gpio\n");
			return ret;
		}
		dp->irq = gpio_to_irq(dp->hpd_gpio);
		irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
	} else {
		dp->hpd_gpio = -ENODEV;
		dp->irq = platform_get_irq(pdev, 0);
		irq_flags = 0;
	}

	if (dp->irq == -ENXIO) {
		dev_err(&pdev->dev, "failed to get irq\n");
		return -ENODEV;
	}

	pm_runtime_enable(dev);

	pm_runtime_get_sync(dev);
	phy_power_on(dp->phy);

	analogix_dp_init_dp(dp);

	ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
					analogix_dp_hardirq,
					analogix_dp_irq_thread,
					irq_flags, "analogix-dp", dp);
	if (ret) {
		dev_err(&pdev->dev, "failed to request irq\n");
		goto err_disable_pm_runtime;
	}
	disable_irq(dp->irq);

	dp->drm_dev = drm_dev;
	dp->encoder = dp->plat_data->encoder;

	dp->aux.name = "DP-AUX";
	dp->aux.transfer = analogix_dpaux_transfer;
	dp->aux.dev = &pdev->dev;

	ret = drm_dp_aux_register(&dp->aux);
	if (ret)
		goto err_disable_pm_runtime;

	ret = analogix_dp_create_bridge(drm_dev, dp);
	if (ret) {
		DRM_ERROR("failed to create bridge (%d)\n", ret);
		drm_encoder_cleanup(dp->encoder);
		goto err_disable_pm_runtime;
	}

	phy_power_off(dp->phy);
	pm_runtime_put(dev);

	return 0;

err_disable_pm_runtime:

	phy_power_off(dp->phy);
	pm_runtime_put(dev);
	pm_runtime_disable(dev);

	return ret;
}
Пример #13
0
static int usbhs_enable(struct device *dev)
{
	struct usbhs_hcd_omap		*omap = dev_get_drvdata(dev);
	struct usbhs_omap_platform_data	*pdata = &omap->platdata;
	unsigned long			flags = 0;
	int				ret = 0;
	unsigned			reg;

	dev_dbg(dev, "starting TI HSUSB Controller\n");
	if (!pdata) {
		dev_dbg(dev, "missing platform_data\n");
		return  -ENODEV;
	}

	spin_lock_irqsave(&omap->lock, flags);
	if (omap->count > 0)
		goto end_count;

	pm_runtime_get_sync(dev);

	if (pdata->ehci_data->phy_reset) {
		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
			gpio_request(pdata->ehci_data->reset_gpio_port[0],
						"USB1 PHY reset");
			gpio_direction_output
				(pdata->ehci_data->reset_gpio_port[0], 0);
		}

		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) {
			gpio_request(pdata->ehci_data->reset_gpio_port[1],
						"USB2 PHY reset");
			gpio_direction_output
				(pdata->ehci_data->reset_gpio_port[1], 0);
		}

		/* Hold the PHY in RESET for enough time till DIR is high */
		udelay(10);
	}

	omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
	dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);

	reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
	/* setup ULPI bypass and burst configurations */
	reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
			| OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
			| OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
	reg |= OMAP4_UHH_HOSTCONFIG_APP_START_CLK;
	reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;

	if (is_omap_usbhs_rev1(omap)) {
		if (pdata->port_mode[0] == OMAP_USBHS_PORT_MODE_UNUSED)
			reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
		if (pdata->port_mode[1] == OMAP_USBHS_PORT_MODE_UNUSED)
			reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
		if (pdata->port_mode[2] == OMAP_USBHS_PORT_MODE_UNUSED)
			reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;

		/* Bypass the TLL module for PHY mode operation */
		if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
			dev_dbg(dev, "OMAP3 ES version <= ES2.1\n");
			if (is_ehci_phy_mode(pdata->port_mode[0]) ||
				is_ehci_phy_mode(pdata->port_mode[1]) ||
					is_ehci_phy_mode(pdata->port_mode[2]))
				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
			else
				reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
		} else {
			dev_dbg(dev, "OMAP3 ES version > ES2.1\n");
			if (is_ehci_phy_mode(pdata->port_mode[0]))
				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
			else
				reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
			if (is_ehci_phy_mode(pdata->port_mode[1]))
				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
			else
				reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
			if (is_ehci_phy_mode(pdata->port_mode[2]))
				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
			else
				reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
		}
	} else if (is_omap_usbhs_rev2(omap)) {
		/* Clear port mode fields for PHY mode*/
		reg &= ~OMAP4_P1_MODE_CLEAR;
		reg &= ~OMAP4_P2_MODE_CLEAR;

		if (is_ehci_phy_mode(pdata->port_mode[0])) {
			ret = clk_set_parent(omap->utmi_p1_fck,
						omap->xclk60mhsp1_ck);
			if (ret != 0) {
				dev_err(dev, "xclk60mhsp1_ck set parent"
				"failed error:%d\n", ret);
				goto err_tll;
			}
		} else if (is_ehci_tll_mode(pdata->port_mode[0])) {
			ret = clk_set_parent(omap->utmi_p1_fck,
						omap->init_60m_fclk);
			if (ret != 0) {
				dev_err(dev, "init_60m_fclk set parent"
				"failed error:%d\n", ret);
				goto err_tll;
			}
			clk_enable(omap->usbhost_p1_fck);
			clk_enable(omap->usbtll_p1_fck);
		}

		if (is_ehci_phy_mode(pdata->port_mode[1])) {
			ret = clk_set_parent(omap->utmi_p2_fck,
						omap->xclk60mhsp2_ck);
			if (ret != 0) {
				dev_err(dev, "xclk60mhsp1_ck set parent"
					"failed error:%d\n", ret);
				goto err_tll;
			}
		} else if (is_ehci_tll_mode(pdata->port_mode[1])) {
			ret = clk_set_parent(omap->utmi_p2_fck,
						omap->init_60m_fclk);
			if (ret != 0) {
				dev_err(dev, "init_60m_fclk set parent"
				"failed error:%d\n", ret);
				goto err_tll;
			}
			clk_enable(omap->usbhost_p2_fck);
			clk_enable(omap->usbtll_p2_fck);
		}

		clk_enable(omap->utmi_p1_fck);
		clk_enable(omap->utmi_p2_fck);

		if (is_ehci_tll_mode(pdata->port_mode[0]) ||
			(is_ohci_port(pdata->port_mode[0])))
			reg |= OMAP4_P1_MODE_TLL;
		else if (is_ehci_hsic_mode(pdata->port_mode[0]))
			reg |= OMAP4_P1_MODE_HSIC;

		if (is_ehci_tll_mode(pdata->port_mode[1]) ||
			(is_ohci_port(pdata->port_mode[1])))
			reg |= OMAP4_P2_MODE_TLL;
		else if (is_ehci_hsic_mode(pdata->port_mode[1]))
			reg |= OMAP4_P2_MODE_HSIC;
	}

	usbhs_write(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
	dev_dbg(dev, "UHH setup done, uhh_hostconfig=%x\n", reg);

	if (is_ehci_tll_mode(pdata->port_mode[0]) ||
		is_ehci_tll_mode(pdata->port_mode[1]) ||
		is_ehci_tll_mode(pdata->port_mode[2]) ||
		(is_ohci_port(pdata->port_mode[0])) ||
		(is_ohci_port(pdata->port_mode[1])) ||
		(is_ohci_port(pdata->port_mode[2]))) {

		/* Enable UTMI mode for required TLL channels */
		if (is_omap_usbhs_rev2(omap))
			usbhs_omap_tll_init(dev, OMAP_REV2_TLL_CHANNEL_COUNT);
		else
			usbhs_omap_tll_init(dev, OMAP_TLL_CHANNEL_COUNT);
	}

	if (pdata->ehci_data->phy_reset) {
		/* Hold the PHY in RESET for enough time till
		 * PHY is settled and ready
		 */
		udelay(10);

		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
			gpio_set_value
				(pdata->ehci_data->reset_gpio_port[0], 1);

		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
			gpio_set_value
				(pdata->ehci_data->reset_gpio_port[1], 1);
	}

end_count:
	omap->count++;
	spin_unlock_irqrestore(&omap->lock, flags);
	return 0;

err_tll:
	pm_runtime_put_sync(dev);
	spin_unlock_irqrestore(&omap->lock, flags);
	if (pdata->ehci_data->phy_reset) {
		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
			gpio_free(pdata->ehci_data->reset_gpio_port[0]);

		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
			gpio_free(pdata->ehci_data->reset_gpio_port[1]);
	}
	return ret;
}
static ssize_t baseline_debugfs_read(struct file *filp, char __user *buf,
		size_t count, loff_t *ppos)
{
	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
	struct cyttsp5_device_access_data *dad = data->dad;
	struct device *dev = dad->dev;
	int status = STATUS_FAIL;
	int length = 0;
	int rc;

	if (*ppos)
		goto exit;

	mutex_lock(&dad->sysfs_lock);

	pm_runtime_get_sync(dev);

	rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
	if (rc < 0) {
		dev_err(dev, "%s: Error on request exclusive r=%d\n",
				__func__, rc);
		goto put_pm_runtime;
	}

	rc = cyttsp5_suspend_scan_cmd_(dev);
	if (rc < 0) {
		dev_err(dev, "%s: Error on suspend scan r=%d\n",
				__func__, rc);
		goto release_exclusive;
	}

	rc = _cyttsp5_initialize_baselines_cmd(dev, dad->baseline_sensing_mode,
			&dad->ic_buf[0]);
	if (rc < 0) {
		dev_err(dev, "%s: Error on initialize baselines r=%d\n",
				__func__, rc);
		goto resume_scan;
	}

	length = 1;

	status = STATUS_SUCCESS;

resume_scan:
	cyttsp5_resume_scan_cmd_(dev);

release_exclusive:
	cmd->release_exclusive(dev);

put_pm_runtime:
	pm_runtime_put(dev);

	if (status == STATUS_FAIL)
		length = 0;

	data->pr_buf_len = prepare_print_buffer(status, dad->ic_buf, length,
			data->pr_buf, sizeof(data->pr_buf));

	mutex_unlock(&dad->sysfs_lock);

exit:
	return simple_read_from_buffer(buf, count, ppos, data->pr_buf,
			data->pr_buf_len);
}
Пример #15
0
static irqreturn_t regmap_irq_thread(int irq, void *d)
{
	struct regmap_irq_chip_data *data = d;
	const struct regmap_irq_chip *chip = data->chip;
	struct regmap *map = data->map;
	int ret, i;
	bool handled = false;
	bool print_wakeup = false;
	int sub_irq;
	u32 reg;

	if (data->wakeup_print_enable && (irq == get_wakeup_reason_irq()))
		print_wakeup = true;

	mutex_lock(&data->shutdown_lock);
	if (data->shutdown) {
		dev_err(map->dev, "IRQ %d thread calls after shutdown\n", irq);
		goto exit;
	}

	if (chip->pre_irq && chip->pre_post_irq_data)
		chip->pre_irq(chip->pre_post_irq_data);

	if (chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0) {
			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
				ret);
			pm_runtime_put(map->dev);
			goto exit;
		}
	}

	/*
	 * Read in the statuses, using a single bulk read if possible
	 * in order to reduce the I/O overheads.
	 */
	if (!map->use_single_rw && map->reg_stride == 1 &&
	    data->irq_reg_stride == 1) {
		u8 *buf8 = data->status_reg_buf;
		u16 *buf16 = data->status_reg_buf;
		u32 *buf32 = data->status_reg_buf;

		BUG_ON(!data->status_reg_buf);

		ret = regmap_bulk_read(map, chip->status_base,
				       data->status_reg_buf,
				       chip->num_regs);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto exit;
		}

		for (i = 0; i < data->chip->num_regs; i++) {
			switch (map->format.val_bytes) {
			case 1:
				data->status_buf[i] = buf8[i];
				break;
			case 2:
				data->status_buf[i] = buf16[i];
				break;
			case 4:
				data->status_buf[i] = buf32[i];
				break;
			default:
				BUG();
				goto exit;
			}
		}

	} else {
		for (i = 0; i < data->chip->num_regs; i++) {
			ret = regmap_read(map, chip->status_base +
					  (i * map->reg_stride
					   * data->irq_reg_stride),
					  &data->status_buf[i]);

			if (ret != 0) {
				dev_err(map->dev,
					"Failed to read IRQ status: %d\n",
					ret);
				if (chip->runtime_pm)
					pm_runtime_put(map->dev);
				goto exit;
			}
		}
	}

	/*
	 * Ignore masked IRQs and ack if we need to; we ack early so
	 * there is no race between handling and acknowleding the
	 * interrupt.  We assume that typically few of the interrupts
	 * will fire simultaneously so don't worry about overhead from
	 * doing a write per register.
	 */
	for (i = 0; i < data->chip->num_regs; i++) {
		data->status_buf[i] &= ~data->mask_buf[i];

		if (data->status_buf[i] && chip->ack_base) {
			reg = chip->ack_base +
				(i * map->reg_stride * data->irq_reg_stride);
			ret = regmap_write(map, reg, data->status_buf[i]);
			if (ret != 0)
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
		}
	}

	for (i = 0; i < chip->num_irqs; i++) {
		if (data->status_buf[chip->irqs[i].reg_offset /
				     map->reg_stride] & chip->irqs[i].mask) {
			sub_irq = irq_find_mapping(data->domain, i);
			if (print_wakeup)
				print_wakeup_irq(map->dev, sub_irq);

			handle_nested_irq(sub_irq);
			handled = true;
		}
	}

	data->wakeup_print_enable = false;
	if (chip->runtime_pm)
		pm_runtime_put(map->dev);

exit:
	if (chip->post_irq && chip->pre_post_irq_data)
		chip->post_irq(chip->pre_post_irq_data);

	mutex_unlock(&data->shutdown_lock);
	if (handled)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
}
	/* put the controller in normal mode */
	func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
	func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
	func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
	ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
}

#define MSM_CHG_DCD_POLL_TIME		(100 * HZ/1000) /* 100 msec */
#define MSM_CHG_DCD_MAX_RETRIES		6 /* Tdcd_tmout = 6 * 100 msec */
#define MSM_CHG_PRIMARY_DET_TIME	(40 * HZ/1000) /* TVDPSRC_ON */
#define MSM_CHG_SECONDARY_DET_TIME	(40 * HZ/1000) /* TVDMSRC_ON */
static void msm_chg_detect_work(struct work_struct *w)
{
	struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
	struct usb_phy *phy = &motg->phy;
	bool is_dcd, tmout, vout;
	unsigned long delay;

	dev_dbg(phy->dev, "chg detection work\n");
	switch (motg->chg_state) {
	case USB_CHG_STATE_UNDEFINED:
		pm_runtime_get_sync(phy->dev);
		msm_chg_block_on(motg);
		msm_chg_enable_dcd(motg);
		motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
		motg->dcd_retries = 0;
		delay = MSM_CHG_DCD_POLL_TIME;
		break;
	case USB_CHG_STATE_WAIT_FOR_DCD:
		is_dcd = msm_chg_check_dcd(motg);
		tmout = ++motg->dcd_retries == MSM_CHG_DCD_MAX_RETRIES;
		if (is_dcd || tmout) {
			msm_chg_disable_dcd(motg);
			msm_chg_enable_primary_det(motg);
			delay = MSM_CHG_PRIMARY_DET_TIME;
			motg->chg_state = USB_CHG_STATE_DCD_DONE;
		} else {
			delay = MSM_CHG_DCD_POLL_TIME;
		}
		break;
	case USB_CHG_STATE_DCD_DONE:
		vout = msm_chg_check_primary_det(motg);
		if (vout) {
			msm_chg_enable_secondary_det(motg);
			delay = MSM_CHG_SECONDARY_DET_TIME;
			motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
		} else {
			motg->chg_type = USB_SDP_CHARGER;
			motg->chg_state = USB_CHG_STATE_DETECTED;
			delay = 0;
		}
		break;
	case USB_CHG_STATE_PRIMARY_DONE:
		vout = msm_chg_check_secondary_det(motg);
		if (vout)
			motg->chg_type = USB_DCP_CHARGER;
		else
			motg->chg_type = USB_CDP_CHARGER;
		motg->chg_state = USB_CHG_STATE_SECONDARY_DONE;
		/* fall through */
	case USB_CHG_STATE_SECONDARY_DONE:
		motg->chg_state = USB_CHG_STATE_DETECTED;
	case USB_CHG_STATE_DETECTED:
		msm_chg_block_off(motg);
		dev_dbg(phy->dev, "charger = %d\n", motg->chg_type);
		schedule_work(&motg->sm_work);
		return;
	default:
		return;
	}

	schedule_delayed_work(&motg->chg_work, delay);
}
Пример #17
0
static void regmap_irq_sync_unlock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	int i, ret;
	u32 reg;

	if (d->chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0)
			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
				ret);
	}

	/*
	 * If there's been a change in the mask write it back to the
	 * hardware.  We rely on the use of the regmap core cache to
	 * suppress pointless writes.
	 */
	for (i = 0; i < d->chip->num_regs; i++) {
		if (!d->chip->mask_base)
			continue;

		reg = d->chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (d->chip->mask_invert)
			ret = regmap_update_bits(d->map, reg,
					 d->mask_buf_def[i], ~d->mask_buf[i]);
		else
			ret = regmap_update_bits(d->map, reg,
					 d->mask_buf_def[i], d->mask_buf[i]);
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync masks in %x\n",
				reg);

		reg = d->chip->wake_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (d->wake_buf) {
			if (d->chip->wake_invert)
				ret = regmap_update_bits(d->map, reg,
							 d->mask_buf_def[i],
							 ~d->wake_buf[i]);
			else
				ret = regmap_update_bits(d->map, reg,
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
			if (ret != 0)
				dev_err(d->map->dev,
					"Failed to sync wakes in %x: %d\n",
					reg, ret);
		}
	}

	for (i = 0; i < d->chip->num_type_reg; i++) {
		if (!d->type_buf_def[i])
			continue;
		reg = d->chip->type_base +
			(i * map->reg_stride * d->type_reg_stride);
		if (d->chip->type_invert)
			ret = regmap_update_bits(d->map, reg,
				d->type_buf_def[i], ~d->type_buf[i]);
		else
			ret = regmap_update_bits(d->map, reg,
				d->type_buf_def[i], d->type_buf[i]);
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync type in %x\n",
				reg);
	}

	if (d->chip->runtime_pm)
		pm_runtime_put(map->dev);

	/* If we've changed our wakeup count propagate it to the parent */
	if (d->wake_count < 0)
		for (i = d->wake_count; i < 0; i++)
			irq_set_irq_wake(d->irq, 0);
	else if (d->wake_count > 0)
		for (i = 0; i < d->wake_count; i++)
			irq_set_irq_wake(d->irq, 1);

	d->wake_count = 0;

	mutex_unlock(&d->lock);
}
static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
				size_t count, loff_t *ppos)
{
	struct seq_file *s = file->private_data;
	struct msm_otg *motg = s->private;
	char buf[16];
	struct usb_otg *otg = motg->phy.otg;
	int status = count;
	enum usb_dr_mode req_mode;

	memset(buf, 0x00, sizeof(buf));

	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
		status = -EFAULT;
		goto out;
	}

	if (!strncmp(buf, "host", 4)) {
		req_mode = USB_DR_MODE_HOST;
	} else if (!strncmp(buf, "peripheral", 10)) {
		req_mode = USB_DR_MODE_PERIPHERAL;
	} else if (!strncmp(buf, "none", 4)) {
		req_mode = USB_DR_MODE_UNKNOWN;
	} else {
		status = -EINVAL;
		goto out;
	}

	switch (req_mode) {
	case USB_DR_MODE_UNKNOWN:
		switch (otg->state) {
		case OTG_STATE_A_HOST:
		case OTG_STATE_B_PERIPHERAL:
			set_bit(ID, &motg->inputs);
			clear_bit(B_SESS_VLD, &motg->inputs);
			break;
		default:
			goto out;
		}
		break;
	case USB_DR_MODE_PERIPHERAL:
		switch (otg->state) {
		case OTG_STATE_B_IDLE:
		case OTG_STATE_A_HOST:
			set_bit(ID, &motg->inputs);
			set_bit(B_SESS_VLD, &motg->inputs);
			break;
		default:
			goto out;
		}
		break;
	case USB_DR_MODE_HOST:
		switch (otg->state) {
		case OTG_STATE_B_IDLE:
		case OTG_STATE_B_PERIPHERAL:
			clear_bit(ID, &motg->inputs);
			break;
		default:
			goto out;
		}
		break;
	default:
		goto out;
	}

	pm_runtime_get_sync(otg->phy->dev);
	schedule_work(&motg->sm_work);
out:
	return status;
}
Пример #19
0
static int
nouveau_debugfs_pstate_get(struct seq_file *m, void *data)
{
	struct drm_device *drm = m->private;
	struct nouveau_debugfs *debugfs = nouveau_debugfs(drm);
	struct nvif_object *ctrl = &debugfs->ctrl;
	struct nvif_control_pstate_info_v0 info = {};
	int ret, i;

	if (!debugfs)
		return -ENODEV;

	ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_INFO, &info, sizeof(info));
	if (ret)
		return ret;

	for (i = 0; i < info.count + 1; i++) {
		const s32 state = i < info.count ? i :
			NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT;
		struct nvif_control_pstate_attr_v0 attr = {
			.state = state,
			.index = 0,
		};

		ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_ATTR,
				&attr, sizeof(attr));
		if (ret)
			return ret;

		if (i < info.count)
			seq_printf(m, "%02x:", attr.state);
		else
			seq_printf(m, "%s:", info.pwrsrc == 0 ? "DC" :
					     info.pwrsrc == 1 ? "AC" : "--");

		attr.index = 0;
		do {
			attr.state = state;
			ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_ATTR,
					&attr, sizeof(attr));
			if (ret)
				return ret;

			seq_printf(m, " %s %d", attr.name, attr.min);
			if (attr.min != attr.max)
				seq_printf(m, "-%d", attr.max);
			seq_printf(m, " %s", attr.unit);
		} while (attr.index);

		if (state >= 0) {
			if (info.ustate_ac == state)
				seq_printf(m, " AC");
			if (info.ustate_dc == state)
				seq_printf(m, " DC");
			if (info.pstate == state)
				seq_printf(m, " *");
		} else {
			if (info.ustate_ac < -1)
				seq_printf(m, " AC");
			if (info.ustate_dc < -1)
				seq_printf(m, " DC");
		}

		seq_printf(m, "\n");
	}

	return 0;
}

static ssize_t
nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
			   size_t len, loff_t *offp)
{
	struct seq_file *m = file->private_data;
	struct drm_device *drm = m->private;
	struct nouveau_debugfs *debugfs = nouveau_debugfs(drm);
	struct nvif_object *ctrl = &debugfs->ctrl;
	struct nvif_control_pstate_user_v0 args = { .pwrsrc = -EINVAL };
	char buf[32] = {}, *tmp, *cur = buf;
	long value, ret;

	if (!debugfs)
		return -ENODEV;

	if (len >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(buf, ubuf, len))
		return -EFAULT;

	if ((tmp = strchr(buf, '\n')))
		*tmp = '\0';

	if (!strncasecmp(cur, "dc:", 3)) {
		args.pwrsrc = 0;
		cur += 3;
	} else
	if (!strncasecmp(cur, "ac:", 3)) {
		args.pwrsrc = 1;
		cur += 3;
	}

	if (!strcasecmp(cur, "none"))
		args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN;
	else
	if (!strcasecmp(cur, "auto"))
		args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON;
	else {
		ret = kstrtol(cur, 16, &value);
		if (ret)
			return ret;
		args.ustate = value;
	}

	ret = pm_runtime_get_sync(drm->dev);
	if (ret < 0 && ret != -EACCES)
		return ret;
	ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
	pm_runtime_put_autosuspend(drm->dev);
	if (ret < 0)
		return ret;

	return len;
}

static int
nouveau_debugfs_pstate_open(struct inode *inode, struct file *file)
{
	return single_open(file, nouveau_debugfs_pstate_get, inode->i_private);
}

static const struct file_operations nouveau_pstate_fops = {
	.owner = THIS_MODULE,
	.open = nouveau_debugfs_pstate_open,
	.read = seq_read,
	.write = nouveau_debugfs_pstate_set,
};

static struct drm_info_list nouveau_debugfs_list[] = {
	{ "vbios.rom",  nouveau_debugfs_vbios_image, 0, NULL },
	{ "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)

static const struct nouveau_debugfs_files {
	const char *name;
	const struct file_operations *fops;
} nouveau_debugfs_files[] = {
	{"pstate", &nouveau_pstate_fops},
};

int
nouveau_drm_debugfs_init(struct drm_minor *minor)
{
	struct nouveau_drm *drm = nouveau_drm(minor->dev);
	struct dentry *dentry;
	int i, ret;

	for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) {
		dentry = debugfs_create_file(nouveau_debugfs_files[i].name,
					     S_IRUGO | S_IWUSR,
					     minor->debugfs_root, minor->dev,
					     nouveau_debugfs_files[i].fops);
		if (!dentry)
			return -ENOMEM;
	}

	ret = drm_debugfs_create_files(nouveau_debugfs_list,
				       NOUVEAU_DEBUGFS_ENTRIES,
				       minor->debugfs_root, minor);
	if (ret)
		return ret;

	/* Set the size of the vbios since we know it, and it's confusing to
	 * userspace if it wants to seek() but the file has a length of 0
	 */
	dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
	if (!dentry)
		return 0;

	d_inode(dentry)->i_size = drm->vbios.length;
	dput(dentry);

	return 0;
}

int
nouveau_debugfs_init(struct nouveau_drm *drm)
{
	int ret;

	drm->debugfs = kzalloc(sizeof(*drm->debugfs), GFP_KERNEL);
	if (!drm->debugfs)
		return -ENOMEM;

	ret = nvif_object_init(&drm->client.device.object, 0,
			       NVIF_CLASS_CONTROL, NULL, 0,
			       &drm->debugfs->ctrl);
	if (ret)
		return ret;

	return 0;
}

void
nouveau_debugfs_fini(struct nouveau_drm *drm)
{
	if (drm->debugfs && drm->debugfs->ctrl.priv)
		nvif_object_fini(&drm->debugfs->ctrl);

	kfree(drm->debugfs);
	drm->debugfs = NULL;
}
int fimc_is_companion_open(struct fimc_is_device_companion *device)
{
	int ret = 0;
	struct fimc_is_core *core;
	/* Workaround for Host to use ISP-SPI. Will be removed later.*/
	struct fimc_is_spi_gpio *spi_gpio;
	static char companion_fw_name[100];
	static char master_setf_name[100];
	static char mode_setf_name[100];
	static char fw_name[100];
	static char setf_name[100];

	BUG_ON(!device);

	core = (struct fimc_is_core *)dev_get_drvdata(fimc_is_dev);
	spi_gpio = &core->spi_gpio;

	if (test_bit(FIMC_IS_COMPANION_OPEN, &device->state)) {
		err("already open");
		ret = -EMFILE;
		goto p_err;
	}

	device->companion_status = FIMC_IS_COMPANION_OPENNING;
#if defined(CONFIG_PM_RUNTIME)
	pm_runtime_get_sync(&device->pdev->dev);
#endif
	ret = fimc_is_sec_fw_sel(core, &device->pdev->dev, fw_name, setf_name, 0);
	if (ret < 0) {
		err("failed to select firmware (%d)", ret);
		goto p_err;
	}

	ret = fimc_is_sec_concord_fw_sel(core, &device->pdev->dev, companion_fw_name, master_setf_name, mode_setf_name, 0);

	/* TODO: loading firmware */
	fimc_is_s_int_comb_isp(core, false, INTMR2_INTMCIS22);

	// Workaround for Host to use ISP-SPI. Will be removed later.
	/* set pin output for Host to use SPI*/
	pin_config_set(FIMC_IS_SPI_PINNAME, spi_gpio->spi_ssn,
		PINCFG_PACK(PINCFG_TYPE_FUNC, FUNC_OUTPUT));

	fimc_is_set_spi_config(spi_gpio, FIMC_IS_SPI_FUNC, false);

	if (fimc_is_comp_is_valid(core) == 0) {
		ret = fimc_is_comp_loadfirm(core);
		if (ret) {
			err("fimc_is_comp_loadfirm() fail");
			goto p_err;
                }
		ret = fimc_is_comp_loadcal(core);
		if (ret) {
			err("fimc_is_comp_loadcal() fail");
		}
		if(core->fan53555_client != NULL)
			fimc_is_power_binning(core);
		ret = fimc_is_comp_loadsetf(core);
		if (ret) {
			err("fimc_is_comp_loadsetf() fail");
			goto p_err;
		}
	}

	// Workaround for Host to use ISP-SPI. Will be removed later.
	/* Set SPI pins to low before changing pin function */
	pin_config_set(FIMC_IS_SPI_PINNAME, spi_gpio->spi_sclk,
		PINCFG_PACK(PINCFG_TYPE_DAT, 0));
	pin_config_set(FIMC_IS_SPI_PINNAME, spi_gpio->spi_ssn,
		PINCFG_PACK(PINCFG_TYPE_DAT, 0));
	pin_config_set(FIMC_IS_SPI_PINNAME, spi_gpio->spi_miso,
		PINCFG_PACK(PINCFG_TYPE_DAT, 0));
	pin_config_set(FIMC_IS_SPI_PINNAME, spi_gpio->spi_mois,
		PINCFG_PACK(PINCFG_TYPE_DAT, 0));

	/* Set pin function for A5 to use SPI */
	pin_config_set(FIMC_IS_SPI_PINNAME, spi_gpio->spi_ssn,
		PINCFG_PACK(PINCFG_TYPE_FUNC, 2));

	set_bit(FIMC_IS_COMPANION_OPEN, &device->state);
	device->companion_status = FIMC_IS_COMPANION_OPENDONE;
	fimc_is_companion_wakeup(device);

p_err:
	info("[COMP:D] %s(%d)status(%d)\n", __func__, ret,device->companion_status);
	return ret;
}
Пример #21
0
int otg_main_thread(void *data)
{
	struct dwc_otg2 *otg = (struct dwc_otg2 *)data;

	/* Allow the thread to be killed by a signal, but set the signal mask
	 * to block everything but INT, TERM, KILL, and USR1. */
	allow_signal(SIGINT);
	allow_signal(SIGTERM);
	allow_signal(SIGKILL);
	allow_signal(SIGUSR1);

	pm_runtime_get_sync(otg->dev);

	/* Allow the thread to be frozen */
	set_freezable();

	otg_dbg(otg, "Thread running\n");
	while (otg->state != DWC_STATE_TERMINATED) {
		int next = DWC_STATE_B_IDLE;
		otg_dbg(otg, "\n\n\nMain thread entering state\n");

		switch (otg->state) {
		case DWC_STATE_B_IDLE:
			otg_dbg(otg, "DWC_STATE_B_IDLE\n");
			next = do_connector_id_status(otg);
			break;
		case DWC_STATE_CHARGER_DETECTION:
			otg_dbg(otg, "DWC_STATE_CHARGER_DETECTION\n");
			next = do_charger_detection(otg);
			break;
		case DWC_STATE_WAIT_VBUS_FALL:
			otg_dbg(otg, "DWC_STATE_WAIT_VBUS_FALL\n");
			next = do_wait_vbus_fall(otg);
			break;
		case DWC_STATE_CHARGING:
			otg_dbg(otg, "DWC_STATE_CHARGING\n");
			next = do_charging(otg);
			break;
		case DWC_STATE_A_HOST:
			otg_dbg(otg, "DWC_STATE_A_HOST\n");
			next = do_a_host(otg);
			break;
		case DWC_STATE_B_PERIPHERAL:
			otg_dbg(otg, "DWC_STATE_B_PERIPHERAL\n");
			start_peripheral(otg);
			next = do_b_peripheral(otg);

			stop_peripheral(otg);
			break;
		case DWC_STATE_EXIT:
			otg_dbg(otg, "DWC_STATE_EXIT\n");
			next = DWC_STATE_TERMINATED;
			break;
		case DWC_STATE_INVALID:
			otg_dbg(otg, "DWC_STATE_INVALID!!!\n");
		default:
			otg_dbg(otg, "Unknown State %d, sleeping...\n",
					otg->state);
			sleep_main_thread(otg);
			break;
		}

		otg->prev = otg->state;
		otg->state = next;
	}

	pm_runtime_mark_last_busy(otg->dev);
	pm_runtime_put_autosuspend(otg->dev);
	otg->main_thread = NULL;
	otg_dbg(otg, "OTG main thread exiting....\n");

	return 0;
}
Пример #22
0
static int omap2430_musb_init(struct musb *musb)
{
	u32 l;
	int status = 0;
	struct device *dev = musb->controller;
	struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
	struct musb_hdrc_platform_data *plat = dev->platform_data;
	struct omap_musb_board_data *data = plat->board_data;

	/* We require some kind of external transceiver, hooked
	 * up through ULPI.  TWL4030-family PMICs include one,
	 * which needs a driver, drivers aren't always needed.
	 */
	if (dev->parent->of_node)
		musb->xceiv = devm_usb_get_phy_by_phandle(dev->parent,
		    "usb-phy", 0);
	else
		musb->xceiv = devm_usb_get_phy_dev(dev, 0);

	if (IS_ERR(musb->xceiv)) {
		status = PTR_ERR(musb->xceiv);

		if (status == -ENXIO)
			return status;

		pr_err("HS USB OTG: no transceiver configured\n");
		return -EPROBE_DEFER;
	}

	musb->isr = omap2430_musb_interrupt;

	status = pm_runtime_get_sync(dev);
	if (status < 0) {
		dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
		goto err1;
	}

	l = musb_readl(musb->mregs, OTG_INTERFSEL);

	if (data->interface_type == MUSB_INTERFACE_UTMI) {
		/* OMAP4 uses Internal PHY GS70 which uses UTMI interface */
		l &= ~ULPI_12PIN;       /* Disable ULPI */
		l |= UTMI_8BIT;         /* Enable UTMI  */
	} else {
		l |= ULPI_12PIN;
	}

	musb_writel(musb->mregs, OTG_INTERFSEL, l);

	pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
			"sysstatus 0x%x, intrfsel 0x%x, simenable  0x%x\n",
			musb_readl(musb->mregs, OTG_REVISION),
			musb_readl(musb->mregs, OTG_SYSCONFIG),
			musb_readl(musb->mregs, OTG_SYSSTATUS),
			musb_readl(musb->mregs, OTG_INTERFSEL),
			musb_readl(musb->mregs, OTG_SIMENABLE));

	setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);

	if (glue->status != OMAP_MUSB_UNKNOWN)
		omap_musb_set_mailbox(glue);

	usb_phy_init(musb->xceiv);

	pm_runtime_put_noidle(musb->controller);
	return 0;

err1:
	return status;
}
Пример #23
0
static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
						unsigned long arg)
{
	struct omap_wdt_dev *wdev;
	int new_margin;
	static const struct watchdog_info ident = {
		.identity = "OMAP Watchdog",
		.options = WDIOF_SETTIMEOUT,
		.firmware_version = 0,
	};

	wdev = file->private_data;

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		return copy_to_user((struct watchdog_info __user *)arg, &ident,
				sizeof(ident));
	case WDIOC_GETSTATUS:
		return put_user(0, (int __user *)arg);
	case WDIOC_GETBOOTSTATUS:
		if (cpu_is_omap16xx())
			return put_user(__raw_readw(ARM_SYSST),
					(int __user *)arg);
		if (cpu_is_omap24xx())
			return put_user(omap_prcm_get_reset_sources(),
					(int __user *)arg);
	case WDIOC_KEEPALIVE:
		pm_runtime_get_sync(wdev->dev);
		spin_lock(&wdt_lock);
		omap_wdt_ping(wdev);
		spin_unlock(&wdt_lock);
		pm_runtime_put_sync(wdev->dev);
		return 0;
	case WDIOC_SETTIMEOUT:
		if (get_user(new_margin, (int __user *)arg))
			return -EFAULT;
		omap_wdt_adjust_timeout(new_margin);

		pm_runtime_get_sync(wdev->dev);
		spin_lock(&wdt_lock);
		omap_wdt_disable(wdev);
		omap_wdt_set_timeout(wdev);
		omap_wdt_enable(wdev);

		omap_wdt_ping(wdev);
		spin_unlock(&wdt_lock);
		pm_runtime_put_sync(wdev->dev);
		/* Fall */
	case WDIOC_GETTIMEOUT:
		return put_user(timer_margin, (int __user *)arg);
	default:
		return -ENOTTY;
	}
}

static const struct file_operations omap_wdt_fops = {
	.owner = THIS_MODULE,
	.write = omap_wdt_write,
	.unlocked_ioctl = omap_wdt_ioctl,
	.open = omap_wdt_open,
	.release = omap_wdt_release,
	.llseek = no_llseek,
};

static int __devinit omap_wdt_probe(struct platform_device *pdev)
{
	struct resource *res, *mem;
	struct omap_wdt_dev *wdev;
	int ret;

	/* reserve static register mappings */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		ret = -ENOENT;
		goto err_get_resource;
	}

	if (omap_wdt_dev) {
		ret = -EBUSY;
		goto err_busy;
	}

	mem = request_mem_region(res->start, resource_size(res), pdev->name);
	if (!mem) {
		ret = -EBUSY;
		goto err_busy;
	}

	wdev = kzalloc(sizeof(struct omap_wdt_dev), GFP_KERNEL);
	if (!wdev) {
		ret = -ENOMEM;
		goto err_kzalloc;
	}

	wdev->omap_wdt_users = 0;
	wdev->mem = mem;
	wdev->dev = &pdev->dev;

	wdev->base = ioremap(res->start, resource_size(res));
	if (!wdev->base) {
		ret = -ENOMEM;
		goto err_ioremap;
	}

	platform_set_drvdata(pdev, wdev);

	pm_runtime_enable(wdev->dev);
	pm_runtime_get_sync(wdev->dev);

	omap_wdt_disable(wdev);
	omap_wdt_adjust_timeout(timer_margin);

	wdev->omap_wdt_miscdev.parent = &pdev->dev;
	wdev->omap_wdt_miscdev.minor = WATCHDOG_MINOR;
	wdev->omap_wdt_miscdev.name = "watchdog";
	wdev->omap_wdt_miscdev.fops = &omap_wdt_fops;

	ret = misc_register(&(wdev->omap_wdt_miscdev));
	if (ret)
		goto err_misc;

	pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n",
		__raw_readl(wdev->base + OMAP_WATCHDOG_REV) & 0xFF,
		timer_margin);

	pm_runtime_put_sync(wdev->dev);

	omap_wdt_dev = pdev;

	return 0;

err_misc:
	platform_set_drvdata(pdev, NULL);
	iounmap(wdev->base);

err_ioremap:
	wdev->base = NULL;
	kfree(wdev);

err_kzalloc:
	release_mem_region(res->start, resource_size(res));

err_busy:
err_get_resource:

	return ret;
}

static void omap_wdt_shutdown(struct platform_device *pdev)
{
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);

	if (wdev->omap_wdt_users) {
		pm_runtime_get_sync(wdev->dev);
		omap_wdt_disable(wdev);
		pm_runtime_put_sync(wdev->dev);
	}
}

static int __devexit omap_wdt_remove(struct platform_device *pdev)
{
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	if (!res)
		return -ENOENT;

	misc_deregister(&(wdev->omap_wdt_miscdev));
	release_mem_region(res->start, resource_size(res));
	platform_set_drvdata(pdev, NULL);

	iounmap(wdev->base);

	kfree(wdev);
	omap_wdt_dev = NULL;

	return 0;
}

#ifdef	CONFIG_PM

/* REVISIT ... not clear this is the best way to handle system suspend; and
 * it's very inappropriate for selective device suspend (e.g. suspending this
 * through sysfs rather than by stopping the watchdog daemon).  Also, this
 * may not play well enough with NOWAYOUT...
 */

static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
{
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);

	if (wdev->omap_wdt_users) {
		pm_runtime_get_sync(wdev->dev);
		omap_wdt_disable(wdev);
		pm_runtime_put_sync(wdev->dev);
	}

	return 0;
}

static int omap_wdt_resume(struct platform_device *pdev)
{
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);

	if (wdev->omap_wdt_users) {
		pm_runtime_get_sync(wdev->dev);
		omap_wdt_enable(wdev);
		omap_wdt_ping(wdev);
		pm_runtime_put_sync(wdev->dev);
	}

	return 0;
}

#else
#define	omap_wdt_suspend	NULL
#define	omap_wdt_resume		NULL
#endif

static struct platform_driver omap_wdt_driver = {
	.probe		= omap_wdt_probe,
	.remove		= __devexit_p(omap_wdt_remove),
	.shutdown	= omap_wdt_shutdown,
	.suspend	= omap_wdt_suspend,
	.resume		= omap_wdt_resume,
	.driver		= {
		.owner	= THIS_MODULE,
		.name	= "omap_wdt",
	},
};

static int __init omap_wdt_init(void)
{
	spin_lock_init(&wdt_lock);
	return platform_driver_register(&omap_wdt_driver);
}
/**
 * exynos_drd_switch_start_host -  helper function for starting/stoping the host
 * controller driver.
 *
 * @otg: Pointer to the usb_otg structure.
 * @on: start / stop the host controller driver.
 *
 * Returns 0 on success otherwise negative errno.
 */
static int exynos_drd_switch_start_host(struct usb_otg *otg, int on)
{
	struct exynos_drd_switch *drd_switch = container_of(otg,
					struct exynos_drd_switch, otg);
	struct exynos_drd *drd = container_of(drd_switch->core,
						struct exynos_drd, core);
	struct device *xhci_dev = &drd->xhci->dev;
	int ret = 0;

	if (!drd->add_xhci_dev && drd->xhci) {
		ret = platform_device_add(drd->xhci);
		if (ret) {
			dev_err(drd->dev, "failed to register xhci device\n");
			return ret;
		}
		drd->add_xhci_dev = 1;
	}

	if (!otg->host) {
		dev_err(drd->dev, "failed binding host\n");
		return -EINVAL;
	}

	dev_dbg(otg->phy->dev, "Turn %s host %s\n",
			on ? "on" : "off", otg->host->bus_name);

	if (on) {
		wake_lock(&drd_switch->wakelock);
		/*
		 * Clear runtime_error flag. The flag could be
		 * set when user space accessed the host while DRD
		 * was in B-Dev mode.
		 */
		pm_runtime_disable(xhci_dev);
		if (pm_runtime_status_suspended(xhci_dev))
			pm_runtime_set_suspended(xhci_dev);
		else
			pm_runtime_set_active(xhci_dev);
		pm_runtime_enable(xhci_dev);

		ret = pm_runtime_get_sync(xhci_dev);
		if (ret < 0 && ret != -EINPROGRESS) {
			pm_runtime_put_noidle(xhci_dev);
			goto err;
		}

		exynos_drd_switch_ases_vbus_ctrl(drd_switch, 1);
	} else {
		exynos_drd_switch_ases_vbus_ctrl(drd_switch, 0);

		ret = pm_runtime_put_sync(xhci_dev);
		if (ret == -EAGAIN)
			pm_runtime_get_noresume(xhci_dev);
		else
			wake_unlock(&drd_switch->wakelock);
	}

err:
	/* ret can be 1 after pm_runtime_get_sync */
	return (ret < 0) ? ret : 0;
}
Пример #25
0
PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
{
#if !defined(NO_HARDWARE)
	SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;

	
	if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
	{
		return PVRSRV_OK;
	}

	PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));

#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) && defined(CONFIG_PM_RUNTIME)
	{
		
		int res = pm_runtime_get_sync(&gpsPVRLDMDev->dev);
		if (res < 0)
		{
			PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: pm_runtime_get_sync failed (%d)", -res));
			return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
		}
	}
#endif


	//disable_clock(MT65XX_PDN_MM_MFG_HALF, "MFG");
	//disable_clock(MT65XX_PDN_MM_MFG, "MFG");
	//disable_clock(MT65XX_PDN_MM_G3D, "MFG");

#if USE_SYS_CLOCK
    #if !defined(MTK_CLK_CTRL)
	DRV_WriteReg16(PLL_CON11, DRV_Reg16(PLL_CON11)|0x0C00); // F3D_CK_SEL
    #endif
	DRV_WriteReg32(MMSYS2_CONFG_BASE + 0x0504, 0x4); // MFG_CORE_CK_SEL DCM
#else
    #if !defined(MTK_CLK_CTRL)
	DRV_WriteReg16(PLL_CON11, DRV_Reg16(PLL_CON11)|0x0C00); // F3D_CK_SEL
    #endif
	DRV_WriteReg32(MMSYS2_CONFG_BASE + 0x0504, 0x6); // MFG_CORE_CK_SEL DCM
#endif

	//DRV_WriteReg32(MMSYS2_CONFG_BASE+0x400, DRV_Reg32(MMSYS2_CONFG_BASE+0x400)|0x4);
	enable_clock(MT65XX_PDN_MM_G3D, "MFG");
	enable_clock(MT65XX_PDN_MM_MFG, "MFG");
	enable_clock(MT65XX_PDN_MM_MFG_HALF, "MFG");
	DRV_WriteReg32(MMSYS2_CONFG_BASE+0x400, DRV_Reg32(MMSYS2_CONFG_BASE+0x400)|0x4);


#if defined(MTK_USE_GDC)
	SysInitGDC();
#endif

#if 0
	DRV_WriteReg16(0xF0007404, 0x6);
	DRV_WriteReg16(0xF0007400, 0x4800);
	DRV_WriteReg16(0xF0007400, 0x0800);
	DRV_WriteReg16(0xF0007400, 0x8800);
	while(DRV_Reg16(0xF0007404) & 0x8000){}
	PVRSRVReleasePrintf("MainPLL: 0x%x", DRV_Reg16(0xF0007410));

	DRV_WriteReg16(0xF0007404, 0x15);
	DRV_WriteReg16(0xF0007400, 0x4800);
	DRV_WriteReg16(0xF0007400, 0x0800);
	DRV_WriteReg16(0xF0007400, 0x8800);
	while(DRV_Reg16(0xF0007404) & 0x8000){}
	PVRSRVReleasePrintf("MEMPLL: 0x%x", DRV_Reg16(0xF0007410));
#endif

	SysEnableSGXInterrupts(psSysData);

	atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);

#else	
	PVR_UNREFERENCED_PARAMETER(psSysData);
#endif
	return PVRSRV_OK;
}
Пример #26
0
static int mxsfb_load(struct drm_device *drm, unsigned long flags)
{
	struct platform_device *pdev = to_platform_device(drm->dev);
	struct mxsfb_drm_private *mxsfb;
	struct resource *res;
	int ret;

	mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL);
	if (!mxsfb)
		return -ENOMEM;

	drm->dev_private = mxsfb;
	mxsfb->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	mxsfb->base = devm_ioremap_resource(drm->dev, res);
	if (IS_ERR(mxsfb->base))
		return PTR_ERR(mxsfb->base);

	mxsfb->clk = devm_clk_get(drm->dev, NULL);
	if (IS_ERR(mxsfb->clk))
		return PTR_ERR(mxsfb->clk);

	mxsfb->clk_axi = devm_clk_get(drm->dev, "axi");
	if (IS_ERR(mxsfb->clk_axi))
		mxsfb->clk_axi = NULL;

	mxsfb->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi");
	if (IS_ERR(mxsfb->clk_disp_axi))
		mxsfb->clk_disp_axi = NULL;

	ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	pm_runtime_enable(drm->dev);

	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
	if (ret < 0) {
		dev_err(drm->dev, "Failed to initialise vblank\n");
		goto err_vblank;
	}

	/* Modeset init */
	drm_mode_config_init(drm);

	ret = mxsfb_create_output(drm);
	if (ret < 0) {
		dev_err(drm->dev, "Failed to create outputs\n");
		goto err_vblank;
	}

	ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
			mxsfb_formats, ARRAY_SIZE(mxsfb_formats),
			&mxsfb->connector);
	if (ret < 0) {
		dev_err(drm->dev, "Cannot setup simple display pipe\n");
		goto err_vblank;
	}

	ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector);
	if (ret) {
		dev_err(drm->dev, "Cannot connect panel\n");
		goto err_vblank;
	}

	drm->mode_config.min_width	= MXSFB_MIN_XRES;
	drm->mode_config.min_height	= MXSFB_MIN_YRES;
	drm->mode_config.max_width	= MXSFB_MAX_XRES;
	drm->mode_config.max_height	= MXSFB_MAX_YRES;
	drm->mode_config.funcs		= &mxsfb_mode_config_funcs;

	drm_mode_config_reset(drm);

	pm_runtime_get_sync(drm->dev);
	ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
	pm_runtime_put_sync(drm->dev);

	if (ret < 0) {
		dev_err(drm->dev, "Failed to install IRQ handler\n");
		goto err_irq;
	}

	drm_kms_helper_poll_init(drm);

	mxsfb->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
					  drm->mode_config.num_connector);
	if (IS_ERR(mxsfb->fbdev)) {
		mxsfb->fbdev = NULL;
		dev_err(drm->dev, "Failed to init FB CMA area\n");
		goto err_cma;
	}

	platform_set_drvdata(pdev, drm);

	drm_helper_hpd_irq_event(drm);

	return 0;

err_cma:
	drm_irq_uninstall(drm);
err_irq:
	drm_panel_detach(mxsfb->panel);
err_vblank:
	pm_runtime_disable(drm->dev);

	return ret;
}
Пример #27
0
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
	struct msm_gpu *gpu = &adreno_gpu->base;
	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
	int status, ret;

	if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
		return 0;

	gmu->hung = false;

	/* Turn on the resources */
	pm_runtime_get_sync(gmu->dev);

	/* Use a known rate to bring up the GMU */
	clk_set_rate(gmu->core_clk, 200000000);
	ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
	if (ret) {
		pm_runtime_put(gmu->dev);
		return ret;
	}

	/* Set the bus quota to a reasonable value for boot */
	icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));

	/* Enable the GMU interrupt */
	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
	enable_irq(gmu->gmu_irq);

	/* Check to see if we are doing a cold or warm boot */
	status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
		GMU_WARM_BOOT : GMU_COLD_BOOT;

	ret = a6xx_gmu_fw_start(gmu, status);
	if (ret)
		goto out;

	ret = a6xx_hfi_start(gmu, status);
	if (ret)
		goto out;

	/*
	 * Turn on the GMU firmware fault interrupt after we know the boot
	 * sequence is successful
	 */
	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
	enable_irq(gmu->hfi_irq);

	/* Set the GPU to the highest power frequency */
	__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);

	/*
	 * "enable" the GX power domain which won't actually do anything but it
	 * will make sure that the refcounting is correct in case we need to
	 * bring down the GX after a GMU failure
	 */
	if (!IS_ERR_OR_NULL(gmu->gxpd))
		pm_runtime_get(gmu->gxpd);

out:
	/* On failure, shut down the GMU to leave it in a good state */
	if (ret) {
		disable_irq(gmu->gmu_irq);
		a6xx_rpmh_stop(gmu);
		pm_runtime_put(gmu->dev);
	}

	return ret;
}
Пример #28
0
static int dwc3_probe(struct platform_device *pdev)
{
	struct device		*dev = &pdev->dev;
	struct dwc3_platform_data *pdata = dev_get_platdata(dev);
	struct device_node	*node = dev->of_node;
	struct resource		*res;
	struct dwc3		*dwc;

	int			ret = -ENOMEM;

	void __iomem		*regs;
	void			*mem;

	mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
	if (!mem) {
		dev_err(dev, "not enough memory\n");
		return -ENOMEM;
	}
	dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
	dwc->mem = mem;

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res) {
		dev_err(dev, "missing IRQ\n");
		return -ENODEV;
	}
	dwc->xhci_resources[1].start = res->start;
	dwc->xhci_resources[1].end = res->end;
	dwc->xhci_resources[1].flags = res->flags;
	dwc->xhci_resources[1].name = res->name;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(dev, "missing memory resource\n");
		return -ENODEV;
	}

	if (node) {
		dwc->maximum_speed = of_usb_get_maximum_speed(node);

		dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
		dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);

		dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
		dwc->dr_mode = of_usb_get_dr_mode(node);
	} else if (pdata) {
		dwc->maximum_speed = pdata->maximum_speed;

		dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
		dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);

		dwc->needs_fifo_resize = pdata->tx_fifo_resize;
		dwc->dr_mode = pdata->dr_mode;
	} else {
		dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
		dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
	}

	/* default to superspeed if no maximum_speed passed */
	if (dwc->maximum_speed == USB_SPEED_UNKNOWN)
		dwc->maximum_speed = USB_SPEED_SUPER;

	if (IS_ERR(dwc->usb2_phy)) {
		ret = PTR_ERR(dwc->usb2_phy);
		if (ret == -ENXIO || ret == -ENODEV) {
			dwc->usb2_phy = NULL;
		} else if (ret == -EPROBE_DEFER) {
			return ret;
		} else {
			dev_err(dev, "no usb2 phy configured\n");
			return ret;
		}
	}

	if (IS_ERR(dwc->usb3_phy)) {
		ret = PTR_ERR(dwc->usb3_phy);
		if (ret == -ENXIO || ret == -ENODEV) {
			dwc->usb3_phy = NULL;
		} else if (ret == -EPROBE_DEFER) {
			return ret;
		} else {
			dev_err(dev, "no usb3 phy configured\n");
			return ret;
		}
	}

	dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
	if (IS_ERR(dwc->usb2_generic_phy)) {
		ret = PTR_ERR(dwc->usb2_generic_phy);
		if (ret == -ENOSYS || ret == -ENODEV) {
			dwc->usb2_generic_phy = NULL;
		} else if (ret == -EPROBE_DEFER) {
			return ret;
		} else {
			dev_err(dev, "no usb2 phy configured\n");
			return ret;
		}
	}

	dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
	if (IS_ERR(dwc->usb3_generic_phy)) {
		ret = PTR_ERR(dwc->usb3_generic_phy);
		if (ret == -ENOSYS || ret == -ENODEV) {
			dwc->usb3_generic_phy = NULL;
		} else if (ret == -EPROBE_DEFER) {
			return ret;
		} else {
			dev_err(dev, "no usb3 phy configured\n");
			return ret;
		}
	}

	dwc->xhci_resources[0].start = res->start;
	dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
					DWC3_XHCI_REGS_END;
	dwc->xhci_resources[0].flags = res->flags;
	dwc->xhci_resources[0].name = res->name;

	res->start += DWC3_GLOBALS_REGS_START;

	/*
	 * Request memory region but exclude xHCI regs,
	 * since it will be requested by the xhci-plat driver.
	 */
	regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(regs))
		return PTR_ERR(regs);

	spin_lock_init(&dwc->lock);
	platform_set_drvdata(pdev, dwc);

	dwc->regs	= regs;
	dwc->regs_size	= resource_size(res);
	dwc->dev	= dev;

	dev->dma_mask	= dev->parent->dma_mask;
	dev->dma_parms	= dev->parent->dma_parms;
	dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);

	pm_runtime_enable(dev);
	pm_runtime_get_sync(dev);
	pm_runtime_forbid(dev);

	dwc3_cache_hwparams(dwc);

	ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
	if (ret) {
		dev_err(dwc->dev, "failed to allocate event buffers\n");
		ret = -ENOMEM;
		goto err0;
	}

	if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
		dwc->dr_mode = USB_DR_MODE_HOST;
	else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
		dwc->dr_mode = USB_DR_MODE_PERIPHERAL;

	if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
		dwc->dr_mode = USB_DR_MODE_OTG;

	ret = dwc3_core_init(dwc);
	if (ret) {
		dev_err(dev, "failed to initialize core\n");
		goto err0;
	}

	usb_phy_set_suspend(dwc->usb2_phy, 0);
	usb_phy_set_suspend(dwc->usb3_phy, 0);
	ret = phy_power_on(dwc->usb2_generic_phy);
	if (ret < 0)
		goto err1;

	ret = phy_power_on(dwc->usb3_generic_phy);
	if (ret < 0)
		goto err_usb2phy_power;

	ret = dwc3_event_buffers_setup(dwc);
	if (ret) {
		dev_err(dwc->dev, "failed to setup event buffers\n");
		goto err_usb3phy_power;
	}

	switch (dwc->dr_mode) {
	case USB_DR_MODE_PERIPHERAL:
		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
		ret = dwc3_gadget_init(dwc);
		if (ret) {
			dev_err(dev, "failed to initialize gadget\n");
			goto err2;
		}
		break;
	case USB_DR_MODE_HOST:
		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
		ret = dwc3_host_init(dwc);
		if (ret) {
			dev_err(dev, "failed to initialize host\n");
			goto err2;
		}
		break;
	case USB_DR_MODE_OTG:
		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
		ret = dwc3_host_init(dwc);
		if (ret) {
			dev_err(dev, "failed to initialize host\n");
			goto err2;
		}

		ret = dwc3_gadget_init(dwc);
		if (ret) {
			dev_err(dev, "failed to initialize gadget\n");
			goto err2;
		}
		break;
	default:
		dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
		goto err2;
	}

	ret = dwc3_debugfs_init(dwc);
	if (ret) {
		dev_err(dev, "failed to initialize debugfs\n");
		goto err3;
	}

	pm_runtime_allow(dev);

	return 0;

err3:
	switch (dwc->dr_mode) {
	case USB_DR_MODE_PERIPHERAL:
		dwc3_gadget_exit(dwc);
		break;
	case USB_DR_MODE_HOST:
		dwc3_host_exit(dwc);
		break;
	case USB_DR_MODE_OTG:
		dwc3_host_exit(dwc);
		dwc3_gadget_exit(dwc);
		break;
	default:
		/* do nothing */
		break;
	}

err2:
	dwc3_event_buffers_cleanup(dwc);

err_usb3phy_power:
	phy_power_off(dwc->usb3_generic_phy);

err_usb2phy_power:
	phy_power_off(dwc->usb2_generic_phy);

err1:
	usb_phy_set_suspend(dwc->usb2_phy, 1);
	usb_phy_set_suspend(dwc->usb3_phy, 1);
	dwc3_core_exit(dwc);

err0:
	dwc3_free_event_buffers(dwc);

	return ret;
}
Пример #29
0
int s5p_mfc_power_on(void)
{
	atomic_set(&pm->power, 1);

	return pm_runtime_get_sync(pm->device);
}
static ssize_t panel_scan_debugfs_read(struct file *filp, char __user *buf,
		size_t count, loff_t *ppos)
{
	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
	struct cyttsp5_device_access_data *dad = data->dad;
	struct device *dev = dad->dev;
	int status = STATUS_FAIL;
	u8 config;
	u16 actual_read_len;
	int length = 0;
	u8 element_size = 0;
	u8 *buf_offset;
	int elem_offset = 0;
	int rc;

	if (*ppos)
		goto exit;


	mutex_lock(&dad->sysfs_lock);

	pm_runtime_get_sync(dev);

	rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
	if (rc < 0) {
		dev_err(dev, "%s: Error on request exclusive r=%d\n",
				__func__, rc);
		goto put_pm_runtime;
	}

	rc = cyttsp5_suspend_scan_cmd_(dev);
	if (rc < 0) {
		dev_err(dev, "%s: Error on suspend scan r=%d\n",
				__func__, rc);
		goto release_exclusive;
	}

	rc = cyttsp5_exec_scan_cmd_(dev);
	if (rc < 0) {
		dev_err(dev, "%s: Error on execute panel scan r=%d\n",
				__func__, rc);
		goto resume_scan;
	}

	/* Set length to max to read all */
	rc = cyttsp5_ret_scan_data_cmd_(dev, 0, 0xFFFF,
			dad->panel_scan_data_id, dad->ic_buf, &config,
			&actual_read_len, NULL);
	if (rc < 0) {
		dev_err(dev, "%s: Error on retrieve panel scan r=%d\n",
				__func__, rc);
		goto resume_scan;
	}

	length = get_unaligned_le16(&dad->ic_buf[0]);
	buf_offset = dad->ic_buf + length;
	element_size = config & 0x07;
	elem_offset = actual_read_len;
	while (actual_read_len > 0) {
		rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, 0xFFFF,
				dad->panel_scan_data_id, NULL, &config,
				&actual_read_len, buf_offset);
		if (rc < 0)
			goto resume_scan;

		length += actual_read_len * element_size;
		buf_offset = dad->ic_buf + length;
		elem_offset += actual_read_len;
	}
	/* Reconstruct cmd header */
	put_unaligned_le16(length, &dad->ic_buf[0]);
	put_unaligned_le16(elem_offset, &dad->ic_buf[7]);

	/* Do not print command header */
	length -= 5;

	status = STATUS_SUCCESS;

resume_scan:
	cyttsp5_resume_scan_cmd_(dev);

release_exclusive:
	cmd->release_exclusive(dev);

put_pm_runtime:
	pm_runtime_put(dev);

	if (status == STATUS_FAIL)
		length = 0;

	data->pr_buf_len = prepare_print_buffer(status, &dad->ic_buf[5],
			length, data->pr_buf, sizeof(data->pr_buf));

	mutex_unlock(&dad->sysfs_lock);

exit:
	return simple_read_from_buffer(buf, count, ppos, data->pr_buf,
			data->pr_buf_len);
}