Exemple #1
0
static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
{
	if (!list_empty(&td->list))
		dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
	if (!list_empty(&td->fl_list))
		dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);

	dma_pool_free(uhci->td_pool, td, td->dma_handle);
}
Exemple #2
0
/**
 * i2s_read_done - callback from the _dma tasklet_ after read
 * @arg : void pointer to that should be driver data (context)
 *
 * Output parameters
 *      none
 */
static void i2s_read_done(void *arg)
{
	int status = 0;

	struct intel_mid_i2s_hdl *drv_data = arg;
	void *param_complete;
	void __iomem *reg ;

	WARN(!drv_data, "Driver data=NULL\n");
	if (!drv_data)
		return;
	if (!test_bit(I2S_PORT_READ_BUSY, &drv_data->flags))
		dev_WARN(&drv_data->pdev->dev, "spurious read dma complete");

	dma_unmap_single(NULL, drv_data->read_dst,
			 drv_data->read_len, DMA_FROM_DEVICE);
	drv_data->read_len = 0;
	reg = drv_data->ioaddr;
	/* Rx fifo overrun Interrupt */
	change_SSCR0_reg(reg, RIM, SSP_RX_FIFO_OVER_INT_DISABLE);
	param_complete = drv_data->read_param;
	/* Do not change order sequence:
	 * READ_BUSY clear, then test PORT_CLOSING
	 * wakeup for close() function
	 */
	clear_bit(I2S_PORT_READ_BUSY, &drv_data->flags);
	wake_up(&drv_data->wq_chan_closing);
	if (test_bit(I2S_PORT_CLOSING, &drv_data->flags))
		return;
	if (drv_data->read_callback != NULL)
		status = drv_data->read_callback(param_complete);
	else
		dev_warn(&drv_data->pdev->dev, "RD done but not callback set");

}
Exemple #3
0
static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
	enum omap_dwc3_vbus_id_status status)
{
	int	ret;
	u32	val;

	switch (status) {
	case OMAP_DWC3_ID_GROUND:
		if (omap->vbus_reg) {
			ret = regulator_enable(omap->vbus_reg);
			if (ret) {
				dev_err(omap->dev, "regulator enable failed\n");
				return;
			}
		}

		val = dwc3_omap_read_utmi_ctrl(omap);
		val &= ~(USBOTGSS_UTMI_OTG_CTRL_IDDIG
				| USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
				| USBOTGSS_UTMI_OTG_CTRL_SESSEND);
		val |= USBOTGSS_UTMI_OTG_CTRL_SESSVALID
				| USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
		dwc3_omap_write_utmi_ctrl(omap, val);
		break;

	case OMAP_DWC3_VBUS_VALID:
		val = dwc3_omap_read_utmi_ctrl(omap);
		val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
		val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG
				| USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
				| USBOTGSS_UTMI_OTG_CTRL_SESSVALID
				| USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
		dwc3_omap_write_utmi_ctrl(omap, val);
		break;

	case OMAP_DWC3_ID_FLOAT:
		if (omap->vbus_reg)
			regulator_disable(omap->vbus_reg);

	case OMAP_DWC3_VBUS_OFF:
		val = dwc3_omap_read_utmi_ctrl(omap);
		val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
				| USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
				| USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT);
		val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND
				| USBOTGSS_UTMI_OTG_CTRL_IDDIG;
		dwc3_omap_write_utmi_ctrl(omap, val);
		break;

	default:
		dev_WARN(omap->dev, "invalid state\n");
	}
}
Exemple #4
0
/* Wait until @bit of @is->state is set to @state in the interrupt handler. */
int fimc_is_wait_event(struct fimc_is *is, unsigned long bit,
		       unsigned int state, unsigned int timeout)
{

	int ret = wait_event_timeout(is->irq_queue,
				     !state ^ test_bit(bit, &is->state),
				     timeout);
	if (ret == 0) {
		dev_WARN(&is->pdev->dev, "%s() timed out\n", __func__);
		return -ETIME;
	}
	return 0;
}
Exemple #5
0
static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	struct gb_module *module = NULL;
	struct gb_interface *intf = NULL;
	struct gb_bundle *bundle = NULL;
	struct gb_connection *connection = NULL;

	if (is_gb_endo(dev)) {
		/*
		 * Not much to do for an endo, just fall through, as the
		 * "default" attributes are good enough for us.
		 */
		return 0;
	}

	if (is_gb_module(dev)) {
		module = to_gb_module(dev);
	} else if (is_gb_interface(dev)) {
		intf = to_gb_interface(dev);
	} else if (is_gb_bundle(dev)) {
		bundle = to_gb_bundle(dev);
		intf = bundle->intf;
	} else if (is_gb_connection(dev)) {
		connection = to_gb_connection(dev);
		bundle = connection->bundle;
		intf = bundle->intf;
	} else {
		dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n");
		return -EINVAL;
	}

	if (connection) {
		// FIXME
		// add a uevent that can "load" a connection type
		return 0;
	}

	if (bundle) {
		// FIXME
		// add a uevent that can "load" a bundle type
		// This is what we need to bind a driver to so use the info
		// in gmod here as well
		return 0;
	}

	// FIXME
	// "just" a module, be vague here, nothing binds to a module except
	// the greybus core, so there's not much, if anything, we need to
	// advertise.
	return 0;
}
Exemple #6
0
static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
				       struct tegra_plane *plane)
{
	struct device *dev = dc->dev;

	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
		if (plane->dc == dc)
			return true;

		dev_WARN(dev, "head %u owns window %u but is not attached\n",
			 dc->pipe, plane->index);
	}

	return false;
}
Exemple #7
0
/**
 * intel_mid_i2s_set_rd_cb - set the callback function after read is done
 * @drv_data : handle of corresponding ssp i2s (given by i2s_open function)
 * @read_callback : pointer of callback function
 *
 * Output parameters
 *      error : 0 means no error
 */
int intel_mid_i2s_set_rd_cb(struct intel_mid_i2s_hdl *drv_data,
					int (*read_callback)(void *param))
{
	WARN(!drv_data, "Driver data=NULL\n");
	if (!drv_data)
		return -EFAULT;
	mutex_lock(&drv_data->mutex);
	if (!test_bit(I2S_PORT_OPENED, &drv_data->flags)) {
		dev_WARN(&drv_data->pdev->dev, "set WR CB I2S_PORT NOT_OPENED");
		mutex_unlock(&drv_data->mutex);
		return -EPERM;
	}
	/* Do not change read parameters in the middle of a READ request */
	if (test_bit(I2S_PORT_READ_BUSY, &drv_data->flags)) {
		dev_WARN(&drv_data->pdev->dev, "CB reject I2S_PORT_READ_BUSY");
		mutex_unlock(&drv_data->mutex);
		return -EBUSY;
	}
	drv_data->read_callback = read_callback;
	drv_data->read_len = 0;
	mutex_unlock(&drv_data->mutex);
	return 0;

}
Exemple #8
0
static int ucsi_acpi_remove(struct platform_device *pdev)
{
	struct ucsi *ucsi = platform_get_drvdata(pdev);

	acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
				   ACPI_ALL_NOTIFY, ucsi_acpi_notify);

	/* Make sure there are no events in the middle of being processed */
	if (wait_on_bit_timeout(&ucsi->flags, EVENT_PENDING,
				TASK_UNINTERRUPTIBLE,
				msecs_to_jiffies(UCSI_TIMEOUT_MS)))
		dev_WARN(ucsi->dev, "%s: Events still pending\n", __func__);

	ucsi_reset_ppm(ucsi);
	return 0;
}
Exemple #9
0
static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
		struct dwc3_request *req)
{
	struct dwc3		*dwc = dep->dwc;
	int			ret = 0;

	req->request.actual	= 0;
	req->request.status	= -EINPROGRESS;
	req->epnum		= dep->number;

	list_add_tail(&req->list, &dep->request_list);

	/*
	 * Gadget driver might not be quick enough to queue a request
	 * before we get a Transfer Not Ready event on this endpoint.
	 *
	 * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
	 * flag is set, it's telling us that as soon as Gadget queues the
	 * required request, we should kick the transfer here because the
	 * IRQ we were waiting for is long gone.
	 */
	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
		unsigned	direction;

		direction = !!(dep->flags & DWC3_EP0_DIR_IN);

		if (dwc->ep0state != EP0_DATA_PHASE) {
			dev_WARN(dwc->dev, "Unexpected pending request\n");
			return 0;
		}

		ret = dwc3_ep0_start_trans(dwc, direction,
				req->request.dma, req->request.length,
				DWC3_TRBCTL_CONTROL_DATA);
		dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
				DWC3_EP0_DIR_IN);
	} else if (dwc->delayed_status) {
		dwc->delayed_status = false;

		if (dwc->ep0state == EP0_STATUS_PHASE)
			dwc3_ep0_do_control_status(dwc, 1);
		else
			dev_dbg(dwc->dev, "too early for delayed status\n");
	}

	return ret;
}
Exemple #10
0
static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc,
				    enum dma_transfer_direction direction)
{
	struct tegra_adma *tdma = tdc->tdma;
	unsigned int sreq_index = tdc->sreq_index;

	if (tdc->sreq_reserved)
		return tdc->sreq_dir == direction ? 0 : -EINVAL;

	switch (direction) {
	case DMA_MEM_TO_DEV:
		if (sreq_index > ADMA_CH_CTRL_TX_REQ_MAX) {
			dev_err(tdma->dev, "invalid DMA request\n");
			return -EINVAL;
		}

		if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) {
			dev_err(tdma->dev, "DMA request reserved\n");
			return -EINVAL;
		}
		break;

	case DMA_DEV_TO_MEM:
		if (sreq_index > ADMA_CH_CTRL_RX_REQ_MAX) {
			dev_err(tdma->dev, "invalid DMA request\n");
			return -EINVAL;
		}

		if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) {
			dev_err(tdma->dev, "DMA request reserved\n");
			return -EINVAL;
		}
		break;

	default:
		dev_WARN(tdma->dev, "channel %s has invalid transfer type\n",
			 dma_chan_name(&tdc->vc.chan));
		return -EINVAL;
	}

	tdc->sreq_dir = direction;
	tdc->sreq_reserved = true;

	return 0;
}
Exemple #11
0
static void devm_memremap_pages_release(struct device *dev, void *data)
{
	struct page_map *page_map = data;
	struct resource *res = &page_map->res;
	resource_size_t align_start, align_size;
	struct dev_pagemap *pgmap = &page_map->pgmap;

	if (percpu_ref_tryget_live(pgmap->ref)) {
		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
		percpu_ref_put(pgmap->ref);
	}

	/* pages are dead and unused, undo the arch mapping */
	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(resource_size(res), SECTION_SIZE);
	arch_remove_memory(align_start, align_size);
	pgmap_radix_release(res);
	dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
			"%s: failed to free all reserved pages\n", __func__);
}
Exemple #12
0
/**
 * device_pm_add - Add a device to the PM core's list of active devices.
 * @dev: Device to add to the list.
 */
void device_pm_add(struct device *dev)
{
	pr_debug("PM: Adding info for %s:%s\n",
		 dev->bus ? dev->bus->name : "No Bus",
		 kobject_name(&dev->kobj));
	mutex_lock(&dpm_list_mtx);
	if (dev->parent) {
		if (dev->parent->power.status >= DPM_SUSPENDING)
			dev_warn(dev, "parent %s should not be sleeping\n",
				 dev_name(dev->parent));
	} else if (transition_started) {
		/*
		 * We refuse to register parentless devices while a PM
		 * transition is in progress in order to avoid leaving them
		 * unhandled down the road
		 */
		dev_WARN(dev, "Parentless device registered during a PM transaction\n");
	}

	list_add_tail(&dev->power.entry, &dpm_list);
	mutex_unlock(&dpm_list_mtx);
}
Exemple #13
0
static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
					      unsigned int offset)
{
	if (offset >= 0x500 && offset <= 0x581) {
		offset = 0x000 + (offset - 0x500);
		return plane->offset + offset;
	}

	if (offset >= 0x700 && offset <= 0x73c) {
		offset = 0x180 + (offset - 0x700);
		return plane->offset + offset;
	}

	if (offset >= 0x800 && offset <= 0x83e) {
		offset = 0x1c0 + (offset - 0x800);
		return plane->offset + offset;
	}

	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);

	return plane->offset + offset;
}
Exemple #14
0
static int __sunxi_gadget_ep0_queue(struct sunxi_otgc_ep *dep,
		struct sunxi_otgc_request *req)
{
	struct sunxi_otgc		*otgc = dep->otgc;
	int			ret = 0;

	req->request.actual	= 0;
	req->request.status	= -EINPROGRESS;
	req->epnum		= dep->number;

	list_add_tail(&req->list, &dep->request_list);

	if (dep->flags & SUNXI_EP_PENDING_REQUEST) {
		unsigned	direction;

		direction = !!(dep->flags & SUNXI_EP0_DIR_IN);

		if (otgc->ep0state != EP0_DATA_PHASE) {
			dev_WARN(otgc->dev, "Unexpected pending request\n");
			return 0;
		}

		ret = sunxi_ep0_start_trans(otgc, direction,
				req->request.dma, req->request.length,
				SUNXI_TRBCTL_CONTROL_DATA);
		dep->flags &= ~(SUNXI_EP_PENDING_REQUEST |
				SUNXI_EP0_DIR_IN);
	} else if (otgc->delayed_status) {
		otgc->delayed_status = false;

		if (otgc->ep0state == EP0_STATUS_PHASE)
			sunxi_ep0_do_control_status(otgc, 1);
		else
			dev_dbg(otgc->dev, "too early for delayed status\n");
	}

	return ret;
}
Exemple #15
0
static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
{
	u32			reg;
	struct device_node	*node = omap->dev->of_node;
	int			utmi_mode = 0;

	reg = dwc3_omap_read_utmi_ctrl(omap);

	of_property_read_u32(node, "utmi-mode", &utmi_mode);

	switch (utmi_mode) {
	case DWC3_OMAP_UTMI_MODE_SW:
		reg |= USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
		break;
	case DWC3_OMAP_UTMI_MODE_HW:
		reg &= ~USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
		break;
	default:
		dev_WARN(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
	}

	dwc3_omap_write_utmi_ctrl(omap, reg);
}
Exemple #16
0
static void tegra_adma_request_free(struct tegra_adma_chan *tdc)
{
	struct tegra_adma *tdma = tdc->tdma;

	if (!tdc->sreq_reserved)
		return;

	switch (tdc->sreq_dir) {
	case DMA_MEM_TO_DEV:
		clear_bit(tdc->sreq_index, &tdma->tx_requests_reserved);
		break;

	case DMA_DEV_TO_MEM:
		clear_bit(tdc->sreq_index, &tdma->rx_requests_reserved);
		break;

	default:
		dev_WARN(tdma->dev, "channel %s has invalid transfer type\n",
			 dma_chan_name(&tdc->vc.chan));
		return;
	}

	tdc->sreq_reserved = false;
}
int inv_mpu_register_slave(struct module *slave_module,
			struct i2c_client *slave_client,
			struct ext_slave_platform_data *slave_pdata,
			struct ext_slave_descr *(*get_slave_descr)(void))
{
	struct mpu_private_data *mpu = mpu_private_data;
	struct mldl_cfg *mldl_cfg;
	struct ext_slave_descr *slave_descr;
	struct ext_slave_platform_data **pdata_slave;
	char *irq_name = NULL;
	int result = 0;

	if (!slave_client || !slave_pdata || !get_slave_descr)
		return -EINVAL;

	if (!mpu) {
		dev_err(&slave_client->adapter->dev,
			"%s: Null mpu_private_data\n", __func__);
		return -EINVAL;
	}
	mldl_cfg    = &mpu->mldl_cfg;
	pdata_slave = mldl_cfg->pdata_slave;
	slave_descr = get_slave_descr();

	if (!slave_descr) {
		dev_err(&slave_client->adapter->dev,
			"%s: Null ext_slave_descr\n", __func__);
		return -EINVAL;
	}

	mutex_lock(&mpu->mutex);
	if (mpu->pid) {
		mutex_unlock(&mpu->mutex);
		return -EBUSY;
	}

	if (pdata_slave[slave_descr->type]) {
		result = -EBUSY;
		goto out_unlock_mutex;
	}

	slave_pdata->address	= slave_client->addr;
	slave_pdata->irq	= slave_client->irq;
	slave_pdata->adapt_num	= i2c_adapter_id(slave_client->adapter);

	dev_info(&slave_client->adapter->dev,
		"%s: +%s Type %d: Addr: %2x IRQ: %2d, Adapt: %2d\n",
		__func__,
		slave_descr->name,
		slave_descr->type,
		slave_pdata->address,
		slave_pdata->irq,
		slave_pdata->adapt_num);

	switch (slave_descr->type) {
	case EXT_SLAVE_TYPE_ACCEL:
		irq_name = "accelirq";
		break;
	case EXT_SLAVE_TYPE_COMPASS:
		irq_name = "compassirq";
		break;
	case EXT_SLAVE_TYPE_PRESSURE:
		irq_name = "pressureirq";
		break;
	default:
		irq_name = "none";
	};
	if (slave_descr->init) {
		result = slave_descr->init(slave_client->adapter,
					slave_descr,
					slave_pdata);
		if (result) {
			dev_err(&slave_client->adapter->dev,
				"%s init failed %d\n",
				slave_descr->name, result);
			goto out_unlock_mutex;
		}
	}

	pdata_slave[slave_descr->type] = slave_pdata;
	mpu->slave_modules[slave_descr->type] = slave_module;
	mldl_cfg->slave[slave_descr->type] = slave_descr;

	goto out_unlock_mutex;

out_unlock_mutex:
	mutex_unlock(&mpu->mutex);

	if (!result && irq_name && (slave_pdata->irq > 0)) {
		int warn_result;
		dev_info(&slave_client->adapter->dev,
			"Installing %s irq using %d\n",
			irq_name,
			slave_pdata->irq);
		warn_result = slaveirq_init(slave_client->adapter,
					slave_pdata, irq_name);
		if (result)
			dev_WARN(&slave_client->adapter->dev,
				"%s irq assigned error: %d\n",
				slave_descr->name, warn_result);
	} else {
		dev_info(&slave_client->adapter->dev,
			"%s irq not assigned: %d %d %d\n",
			slave_descr->name,
			result, (int)irq_name, slave_pdata->irq);
	}

	return result;
}
int mpu_probe(struct i2c_client *client, const struct i2c_device_id *devid)
{
	struct mpu_platform_data *pdata;
	struct mpu_private_data *mpu;
	struct mldl_cfg *mldl_cfg;
	int res = 0;
	int ii = 0;
	unsigned long irq_flags;

	dev_info(&client->adapter->dev, "%s: %d\n", __func__, ii++);

	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
		res = -ENODEV;
		goto out_check_functionality_failed;
	}

	mpu = kzalloc(sizeof(struct mpu_private_data), GFP_KERNEL);
	if (!mpu) {
		res = -ENOMEM;
		goto out_alloc_data_failed;
	}
	mldl_cfg = &mpu->mldl_cfg;
	mldl_cfg->mpu_ram	= &mpu->mpu_ram;
	mldl_cfg->mpu_gyro_cfg	= &mpu->mpu_gyro_cfg;
	mldl_cfg->mpu_offsets	= &mpu->mpu_offsets;
	mldl_cfg->mpu_chip_info	= &mpu->mpu_chip_info;
	mldl_cfg->inv_mpu_cfg	= &mpu->inv_mpu_cfg;
	mldl_cfg->inv_mpu_state	= &mpu->inv_mpu_state;

	mldl_cfg->mpu_ram->length = MPU_MEM_NUM_RAM_BANKS * MPU_MEM_BANK_SIZE;
	mldl_cfg->mpu_ram->ram = kzalloc(mldl_cfg->mpu_ram->length, GFP_KERNEL);
	if (!mldl_cfg->mpu_ram->ram) {
		res = -ENOMEM;
		goto out_alloc_ram_failed;
	}
	mpu_private_data = mpu;
	i2c_set_clientdata(client, mpu);
	mpu->client = client;

	init_waitqueue_head(&mpu->mpu_event_wait);
	mutex_init(&mpu->mutex);
	init_completion(&mpu->completion);

	mpu->response_timeout = 60;	/* Seconds */
	mpu->timeout.function = mpu_pm_timeout;
	mpu->timeout.data = (u_long) mpu;
	init_timer(&mpu->timeout);

	mpu->nb.notifier_call = mpu_pm_notifier_callback;
	mpu->nb.priority = 0;
	res = register_pm_notifier(&mpu->nb);
	if (res) {
		dev_err(&client->adapter->dev,
			"Unable to register pm_notifier %d\n", res);
		goto out_register_pm_notifier_failed;
	}

	pdata = (struct mpu_platform_data *)client->dev.platform_data;
	if (!pdata) {
		dev_WARN(&client->adapter->dev,
			 "Missing platform data for mpu\n");
	}
	mldl_cfg->pdata = pdata;

	mldl_cfg->mpu_chip_info->addr = client->addr;
	res = inv_mpu_open(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL);

	if (res) {
		dev_err(&client->adapter->dev,
			"Unable to open %s %d\n", MPU_NAME, res);
		res = -ENODEV;
		goto out_whoami_failed;
	}

	mpu->dev.minor = MISC_DYNAMIC_MINOR;
	mpu->dev.name = "mpu";
	mpu->dev.fops = &mpu_fops;
	res = misc_register(&mpu->dev);
	if (res < 0) {
		dev_err(&client->adapter->dev,
			"ERROR: misc_register returned %d\n", res);
		goto out_misc_register_failed;
	}

	if (client->irq) {
		dev_info(&client->adapter->dev,
			 "Installing irq using %d\n", client->irq);
		if (BIT_ACTL_LOW == ((mldl_cfg->pdata->int_config) & BIT_ACTL))
			irq_flags = IRQF_TRIGGER_FALLING;
		else
			irq_flags = IRQF_TRIGGER_RISING;
		res = mpuirq_init(client, mldl_cfg, irq_flags);

		if (res)
			goto out_mpuirq_failed;
	} else {
		dev_WARN(&client->adapter->dev,
			 "Missing %s IRQ\n", MPU_NAME);
	}
	return res;

out_mpuirq_failed:
	misc_deregister(&mpu->dev);
out_misc_register_failed:
	inv_mpu_close(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL);
out_whoami_failed:
	unregister_pm_notifier(&mpu->nb);
out_register_pm_notifier_failed:
	kfree(mldl_cfg->mpu_ram->ram);
	mpu_private_data = NULL;
out_alloc_ram_failed:
	kfree(mpu);
out_alloc_data_failed:
out_check_functionality_failed:
	dev_err(&client->adapter->dev, "%s failed %d\n", __func__, res);
	return res;

}
Exemple #19
0
static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
		struct dwc3_request *req)
{
	struct dwc3		*dwc = dep->dwc;

	req->request.actual	= 0;
	req->request.status	= -EINPROGRESS;
	req->epnum		= dep->number;

	list_add_tail(&req->list, &dep->request_list);

	/*
	 * Gadget driver might not be quick enough to queue a request
	 * before we get a Transfer Not Ready event on this endpoint.
	 *
	 * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
	 * flag is set, it's telling us that as soon as Gadget queues the
	 * required request, we should kick the transfer here because the
	 * IRQ we were waiting for is long gone.
	 */
	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
		unsigned	direction;

		direction = !!(dep->flags & DWC3_EP0_DIR_IN);

		if (dwc->ep0state != EP0_DATA_PHASE) {
			dev_WARN(dwc->dev, "Unexpected pending request\n");
			return 0;
		}

		__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);

		dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
				DWC3_EP0_DIR_IN);

		return 0;
	}

	/*
	 * In case gadget driver asked us to delay the STATUS phase,
	 * handle it here.
	 */
	if (dwc->delayed_status) {
		unsigned	direction;

		direction = !dwc->ep0_expect_in;
		dwc->delayed_status = false;
		usb_gadget_set_state(&dwc->gadget, USB_STATE_CONFIGURED);

		if (dwc->ep0state == EP0_STATUS_PHASE)
			__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
		else
			dev_dbg(dwc->dev, "too early for delayed status\n");

		return 0;
	}

	/*
	 * Unfortunately we have uncovered a limitation wrt the Data Phase.
	 *
	 * Section 9.4 says we can wait for the XferNotReady(DATA) event to
	 * come before issueing Start Transfer command, but if we do, we will
	 * miss situations where the host starts another SETUP phase instead of
	 * the DATA phase.  Such cases happen at least on TD.7.6 of the Link
	 * Layer Compliance Suite.
	 *
	 * The problem surfaces due to the fact that in case of back-to-back
	 * SETUP packets there will be no XferNotReady(DATA) generated and we
	 * will be stuck waiting for XferNotReady(DATA) forever.
	 *
	 * By looking at tables 9-13 and 9-14 of the Databook, we can see that
	 * it tells us to start Data Phase right away. It also mentions that if
	 * we receive a SETUP phase instead of the DATA phase, core will issue
	 * XferComplete for the DATA phase, before actually initiating it in
	 * the wire, with the TRB's status set to "SETUP_PENDING". Such status
	 * can only be used to print some debugging logs, as the core expects
	 * us to go through to the STATUS phase and start a CONTROL_STATUS TRB,
	 * just so it completes right away, without transferring anything and,
	 * only then, we can go back to the SETUP phase.
	 *
	 * Because of this scenario, SNPS decided to change the programming
	 * model of control transfers and support on-demand transfers only for
	 * the STATUS phase. To fix the issue we have now, we will always wait
	 * for gadget driver to queue the DATA phase's struct usb_request, then
	 * start it right away.
	 *
	 * If we're actually in a 2-stage transfer, we will wait for
	 * XferNotReady(STATUS).
	 */
	if (dwc->three_stage_setup) {
		unsigned        direction;

		direction = dwc->ep0_expect_in;
		dwc->ep0state = EP0_DATA_PHASE;

		__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);

		dep->flags &= ~DWC3_EP0_DIR_IN;
	}

	return 0;
}
Exemple #20
0
/**
 * intel_mid_i2s_wr_req - request a write to i2s peripheral
 * @drv_data : handle of corresponding ssp i2s (given by i2s_open function)
 * @src : source buffer where the samples to wrote should be get
 * @len : number of sample to be read (160 samples only right now)
 * @param : private context parameter to give back to write callback
 *
 * Output parameters
 *      error : 0 means no error
 */
int intel_mid_i2s_wr_req(struct intel_mid_i2s_hdl *drv_data, u32 *source,
						size_t len, void *param)
{
	dma_addr_t ssdr_addr;
	struct dma_async_tx_descriptor *txdesc = NULL;
	struct dma_chan *txchan = drv_data->txchan;
	enum dma_ctrl_flags flag;
	dma_addr_t src;
	WARN(!drv_data, "Driver data=NULL\n");
	if (!drv_data)
		return -EFAULT;
	if (!txchan) {
		dev_WARN(&(drv_data->pdev->dev), "wr_req but no txchan\n");
		return -EINVAL;
	}
	if (!len) {
		dev_WARN(&drv_data->pdev->dev, "invalid len 0");
		return -EINVAL;
	}

	dev_dbg(&drv_data->pdev->dev,
			"I2S_WRITE() src=%p, len=%d, drv_data=%p",
							source, len, drv_data);

	src = dma_map_single(NULL, source, len, DMA_TO_DEVICE);
	if (!src) {
		dev_WARN(&drv_data->pdev->dev, "can't map DMA address %p",
								source);
		return -EFAULT;
	}
	drv_data->write_src = src;
	drv_data->write_len = len;
	/* get Data Read/Write address */
	ssdr_addr = (dma_addr_t)(u32)(drv_data->paddr + OFFSET_SSDR);
	set_SSCR1_reg((drv_data->ioaddr), TSRE);
	change_SSCR0_reg((drv_data->ioaddr), TIM,
			 ((drv_data->current_settings).tx_fifo_interrupt));
	flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
	txdesc = txchan->device->device_prep_dma_memcpy(
			txchan,		/* DMA Channel */
			ssdr_addr,	/* DAR */
			src,		/* SAR */
			len,		/* Data Length */
			flag);		/* Flag */
	if (!txdesc) {
		dev_WARN(&(drv_data->pdev->dev),
			"wr_req dma memcpy FAILED(src=%08x,len=%d,txchan=%p)\n",
			(unsigned int) src, len, txchan);
		return -1;
	}
	dev_dbg(&(drv_data->pdev->dev), "WR dma tx summit\n");
	/* Only 1 WRITE at a time allowed */
	if (test_and_set_bit(I2S_PORT_WRITE_BUSY, &drv_data->flags)) {
		dma_unmap_single(NULL, src, len, DMA_TO_DEVICE);
		dev_WARN(&drv_data->pdev->dev, "WR reject I2S_PORT WRITE_BUSY");
		return -EBUSY;
	}
	txdesc->callback = i2s_write_done;
	drv_data->write_param = param;
	txdesc->callback_param = drv_data;
	txdesc->tx_submit(txdesc);
	dev_dbg(&(drv_data->pdev->dev), "wr dma req programmed\n");
	return 0;
}
int mpu_probe(struct i2c_client *client, const struct i2c_device_id *devid)
{
	struct mpu_platform_data *pdata;
	struct mpu_private_data *mpu;
	struct mldl_cfg *mldl_cfg;
	int res = 0;
	int ii = 0;

	dev_info(&client->adapter->dev, "%s: %d\n", __func__, ii++);

	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
		res = -ENODEV;
		goto out_check_functionality_failed;
	}

	mpu = kzalloc(sizeof(struct mpu_private_data), GFP_KERNEL);
	if (!mpu) {
		res = -ENOMEM;
		goto out_alloc_data_failed;
	}
	mldl_cfg = &mpu->mldl_cfg;
	mldl_cfg->mpu_ram = &mpu->mpu_ram;
	mldl_cfg->mpu_gyro_cfg = &mpu->mpu_gyro_cfg;
	mldl_cfg->mpu_offsets = &mpu->mpu_offsets;
	mldl_cfg->mpu_chip_info = &mpu->mpu_chip_info;
	mldl_cfg->inv_mpu_cfg = &mpu->inv_mpu_cfg;
	mldl_cfg->inv_mpu_state = &mpu->inv_mpu_state;

	mldl_cfg->mpu_ram->length = MPU_MEM_NUM_RAM_BANKS * MPU_MEM_BANK_SIZE;
	mldl_cfg->mpu_ram->ram = kzalloc(mldl_cfg->mpu_ram->length, GFP_KERNEL);
	if (!mldl_cfg->mpu_ram->ram) {
		res = -ENOMEM;
		goto out_alloc_ram_failed;
	}
	mpu_private_data = mpu;
	i2c_set_clientdata(client, mpu);
	mpu->client = client;

	init_waitqueue_head(&mpu->mpu_event_wait);
	mutex_init(&mpu->mutex);
	init_completion(&mpu->completion);

	mpu->response_timeout = 60;	/* Seconds */
	mpu->timeout.function = mpu_pm_timeout;
	mpu->timeout.data = (u_long) mpu;
	init_timer(&mpu->timeout);

	pdata = (struct mpu_platform_data *)client->dev.platform_data;
	if (!pdata) {
		dev_WARN(&client->adapter->dev,
			 "Missing platform data for mpu\n");
	} else {
		mldl_cfg->pdata = pdata;

		if (pdata && pdata->setup && pdata->hw_config) {
			pdata->hw_config(&client->dev, 1);
			/* by Spec */
			usleep_range(4000, 5000);
			res = pdata->setup(&client->dev, 1);
			if (res)
				goto out_alloc_ram_failed;
		}
	}

	mldl_cfg->mpu_chip_info->addr = client->addr;
	res = inv_mpu_open(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL);

	if (res) {
		dev_err(&client->adapter->dev,
			"Unable to open %s %d\n", MPU_NAME, res);
		res = -ENODEV;
		goto out_whoami_failed;
	}

	mpu->dev.minor = MISC_DYNAMIC_MINOR;
	mpu->dev.name = "mpu";
	mpu->dev.fops = &mpu_fops;
	res = misc_register(&mpu->dev);
	if (res < 0) {
		dev_err(&client->adapter->dev,
			"ERROR: misc_register returned %d\n", res);
		goto out_misc_register_failed;
	}

	if (client->irq) {
		dev_info(&client->adapter->dev,
			 "Installing irq using %d\n", client->irq);
		res = mpuirq_init(client, mldl_cfg);
		if (res)
			goto out_mpuirq_failed;
	} else {
		dev_WARN(&client->adapter->dev,
			 "Missing %s IRQ\n", MPU_NAME);
	}
	if (!strncmp(mpu_id[1].name, devid->name, sizeof(devid->name))) {
		/* Special case to re-use the inv_mpu_register_slave */
		struct ext_slave_platform_data *slave_pdata;
		slave_pdata = kzalloc(sizeof(*slave_pdata), GFP_KERNEL);
		if (!slave_pdata) {
			res = -ENOMEM;
			goto out_slave_pdata_kzalloc_failed;
		}
		slave_pdata->bus = EXT_SLAVE_BUS_PRIMARY;
		for (ii = 0; ii < 9; ii++)
			slave_pdata->orientation[ii] = pdata->orientation[ii];
		res = inv_mpu_register_slave(
			NULL, client,
			slave_pdata,
			mpu6050_get_slave_descr);
		if (res) {
			/* if inv_mpu_register_slave fails there are no pointer
			   references to the memory allocated to slave_pdata */
			kfree(slave_pdata);
			goto out_slave_pdata_kzalloc_failed;
		}
	}
	return res;

out_slave_pdata_kzalloc_failed:
	if (client->irq)
		mpuirq_exit();
out_mpuirq_failed:
	misc_deregister(&mpu->dev);
out_misc_register_failed:
	inv_mpu_close(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL);
out_whoami_failed:
	unregister_pm_notifier(&mpu->nb);
	kfree(mldl_cfg->mpu_ram->ram);
	mpu_private_data = NULL;
out_alloc_ram_failed:
	kfree(mpu);
out_alloc_data_failed:
out_check_functionality_failed:
	dev_err(&client->adapter->dev, "%s failed %d\n", __func__, res);
	return res;

}
static long mpu_dev_ioctl(struct file *file,
			  unsigned int cmd, unsigned long arg)
{
	struct mpu_platform_data *pdata;
	struct mpu_private_data *mpu =
	    container_of(file->private_data, struct mpu_private_data, dev);
	struct i2c_client *client = mpu->client;
	struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg;
	int retval = 0;
	struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES];
	struct ext_slave_descr **slave = mldl_cfg->slave;
	struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave;
	int ii;
	int res = 0;

	for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) {
		if (!pdata_slave[ii])
			slave_adapter[ii] = NULL;
		else
			slave_adapter[ii] =
				i2c_get_adapter(pdata_slave[ii]->adapt_num);
	}
	slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter;

	retval = mutex_lock_interruptible(&mpu->mutex);
	if (retval) {
		dev_err(&client->adapter->dev,
			"%s: mutex_lock_interruptible returned %d\n",
			__func__, retval);
		return retval;
	}

	switch (cmd) {
	case MPU_GET_EXT_SLAVE_PLATFORM_DATA:
		retval = mpu_dev_ioctl_get_ext_slave_platform_data(
			client,
			(struct ext_slave_platform_data __user *)arg);
		break;
	case MPU_GET_MPU_PLATFORM_DATA:
		retval = mpu_dev_ioctl_get_mpu_platform_data(
			client,
			(struct mpu_platform_data __user *)arg);
		break;
	case MPU_GET_EXT_SLAVE_DESCR:
		retval = mpu_dev_ioctl_get_ext_slave_descr(
			client,
			(struct ext_slave_descr __user *)arg);
		break;
	case MPU_READ:
	case MPU_WRITE:
	case MPU_READ_MEM:
	case MPU_WRITE_MEM:
	case MPU_READ_FIFO:
	case MPU_WRITE_FIFO:
		retval = mpu_handle_mlsl(
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			mldl_cfg->mpu_chip_info->addr, cmd,
			(struct mpu_read_write __user *)arg);
		break;
	case MPU_CONFIG_GYRO:
		retval = inv_mpu_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_CONFIG_ACCEL:
		retval = slave_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_ACCEL],
			slave[EXT_SLAVE_TYPE_ACCEL],
			pdata_slave[EXT_SLAVE_TYPE_ACCEL],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_CONFIG_COMPASS:
		retval = slave_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_COMPASS],
			slave[EXT_SLAVE_TYPE_COMPASS],
			pdata_slave[EXT_SLAVE_TYPE_COMPASS],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_CONFIG_PRESSURE:
		retval = slave_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
			slave[EXT_SLAVE_TYPE_PRESSURE],
			pdata_slave[EXT_SLAVE_TYPE_PRESSURE],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_GET_CONFIG_GYRO:
		retval = inv_mpu_get_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_GET_CONFIG_ACCEL:
		retval = slave_get_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_ACCEL],
			slave[EXT_SLAVE_TYPE_ACCEL],
			pdata_slave[EXT_SLAVE_TYPE_ACCEL],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_GET_CONFIG_COMPASS:
		retval = slave_get_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_COMPASS],
			slave[EXT_SLAVE_TYPE_COMPASS],
			pdata_slave[EXT_SLAVE_TYPE_COMPASS],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_GET_CONFIG_PRESSURE:
		retval = slave_get_config(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
			slave[EXT_SLAVE_TYPE_PRESSURE],
			pdata_slave[EXT_SLAVE_TYPE_PRESSURE],
			(struct ext_slave_config __user *)arg);
		break;
	case MPU_SUSPEND:
		retval = inv_mpu_suspend(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_ACCEL],
			slave_adapter[EXT_SLAVE_TYPE_COMPASS],
			slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
			arg);
		pdata = (struct mpu_platform_data *)client->dev.platform_data;
		if (!pdata) {
			dev_WARN(&client->adapter->dev,
				 "Missing platform data for mpu\n");
		} else {
			if (pdata && pdata->setup && pdata->hw_config) {
				res = pdata->setup(&client->dev, 0);
				pdata->hw_config(&client->dev, 0);
			}
		}
		break;
	case MPU_RESUME:
		pdata = (struct mpu_platform_data *)client->dev.platform_data;
		if (!pdata) {
			dev_WARN(&client->adapter->dev,
				 "Missing platform data for mpu\n");
		} else {
			if (pdata && pdata->setup && pdata->hw_config) {
				pdata->hw_config(&client->dev, 1);
				res = pdata->setup(&client->dev, 1);
				if (res)
					pdata->hw_config(&client->dev, 0);
			}
		}
		retval = inv_mpu_resume(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_ACCEL],
			slave_adapter[EXT_SLAVE_TYPE_COMPASS],
			slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
			arg);
		break;
	case MPU_PM_EVENT_HANDLED:
		dev_dbg(&client->adapter->dev, "%s: %d\n", __func__, cmd);
		complete(&mpu->completion);
		break;
	case MPU_READ_ACCEL:
		retval = inv_slave_read(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_ACCEL],
			slave[EXT_SLAVE_TYPE_ACCEL],
			pdata_slave[EXT_SLAVE_TYPE_ACCEL],
			(unsigned char __user *)arg);
		break;
	case MPU_READ_COMPASS:
		retval = inv_slave_read(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_COMPASS],
			slave[EXT_SLAVE_TYPE_COMPASS],
			pdata_slave[EXT_SLAVE_TYPE_COMPASS],
			(unsigned char __user *)arg);
		break;
	case MPU_READ_PRESSURE:
		retval = inv_slave_read(
			mldl_cfg,
			slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE],
			slave_adapter[EXT_SLAVE_TYPE_PRESSURE],
			slave[EXT_SLAVE_TYPE_PRESSURE],
			pdata_slave[EXT_SLAVE_TYPE_PRESSURE],
			(unsigned char __user *)arg);
		break;
	case MPU_GET_REQUESTED_SENSORS:
		if (copy_to_user(
			   (__u32 __user *)arg,
			   &mldl_cfg->inv_mpu_cfg->requested_sensors,
			   sizeof(mldl_cfg->inv_mpu_cfg->requested_sensors)))
			retval = -EFAULT;
		break;
	case MPU_SET_REQUESTED_SENSORS:
		mldl_cfg->inv_mpu_cfg->requested_sensors = arg;
		break;
	case MPU_GET_IGNORE_SYSTEM_SUSPEND:
		if (copy_to_user(
			(unsigned char __user *)arg,
			&mldl_cfg->inv_mpu_cfg->ignore_system_suspend,
			sizeof(mldl_cfg->inv_mpu_cfg->ignore_system_suspend)))
			retval = -EFAULT;
		break;
	case MPU_SET_IGNORE_SYSTEM_SUSPEND:
		mldl_cfg->inv_mpu_cfg->ignore_system_suspend = arg;
		break;
	case MPU_GET_MLDL_STATUS:
		if (copy_to_user(
			(unsigned char __user *)arg,
			&mldl_cfg->inv_mpu_state->status,
			sizeof(mldl_cfg->inv_mpu_state->status)))
			retval = -EFAULT;
		break;
	case MPU_GET_I2C_SLAVES_ENABLED:
		if (copy_to_user(
			(unsigned char __user *)arg,
			&mldl_cfg->inv_mpu_state->i2c_slaves_enabled,
			sizeof(mldl_cfg->inv_mpu_state->i2c_slaves_enabled)))
			retval = -EFAULT;
		break;
	default:
		dev_err(&client->adapter->dev,
			"%s: Unknown cmd %x, arg %lu\n",
			__func__, cmd, arg);
		retval = -EINVAL;
	}

	mutex_unlock(&mpu->mutex);
	dev_dbg(&client->adapter->dev, "%s: %08x, %08lx, %d\n",
		__func__, cmd, arg, retval);

	if (retval > 0)
		retval = -retval;

	return retval;
}
int inv_mpu_register_slave(struct module *slave_module,
			struct i2c_client *slave_client,
			struct ext_slave_platform_data *slave_pdata,
			struct ext_slave_descr *(*get_slave_descr)(void))
{
	struct mpu_private_data *mpu = mpu_private_data;
	struct mldl_cfg *mldl_cfg;
	struct ext_slave_descr *slave_descr;
	struct ext_slave_platform_data **pdata_slave;
	char *irq_name = NULL;
	int result = 0;

	if (!slave_client || !slave_pdata || !get_slave_descr)
		return -EINVAL;

	if (!mpu) {
		dev_err(&slave_client->adapter->dev,
			"%s: Null mpu_private_data\n", __func__);
		return -EINVAL;
	}
	mldl_cfg    = &mpu->mldl_cfg;
	pdata_slave = mldl_cfg->pdata_slave;
	slave_descr = get_slave_descr();

	if (!slave_descr) {
		dev_err(&slave_client->adapter->dev,
			"%s: Null ext_slave_descr\n", __func__);
		return -EINVAL;
	}

	mutex_lock(&mpu->mutex);
	if (mpu->pid) {
		mutex_unlock(&mpu->mutex);
		return -EBUSY;
	}

	if (pdata_slave[slave_descr->type]) {
		result = -EBUSY;
		goto out_unlock_mutex;
	}

	slave_pdata->address	= slave_client->addr;
	slave_pdata->irq	= slave_client->irq;
	slave_pdata->adapt_num	= i2c_adapter_id(slave_client->adapter);

	dev_info(&slave_client->adapter->dev,
		"%s: +%s Type %d: Addr: %2x IRQ: %2d, Adapt: %2d\n",
		__func__,
		slave_descr->name,
		slave_descr->type,
		slave_pdata->address,
		slave_pdata->irq,
		slave_pdata->adapt_num);

	switch (slave_descr->type) {
	case EXT_SLAVE_TYPE_ACCEL:
		irq_name = "accelirq";
		break;
	case EXT_SLAVE_TYPE_COMPASS:
		irq_name = "compassirq";
		break;
	case EXT_SLAVE_TYPE_PRESSURE:
		irq_name = "pressureirq";
		break;
	default:
		irq_name = "none";
	};
	if (slave_descr->init) {
		result = slave_descr->init(slave_client->adapter,
					slave_descr,
					slave_pdata);
		if (result) {
			dev_err(&slave_client->adapter->dev,
				"%s init failed %d\n",
				slave_descr->name, result);
			goto out_unlock_mutex;
		}
	}

	if (slave_descr->type == EXT_SLAVE_TYPE_ACCEL &&
	    slave_descr->id == ACCEL_ID_MPU6050 &&
	    slave_descr->config) {
		/* pass a reference to the mldl_cfg data
		   structure to the mpu6050 accel "class" */
		struct ext_slave_config config;
		config.key = MPU_SLAVE_CONFIG_INTERNAL_REFERENCE;
		config.len = sizeof(struct mldl_cfg *);
		config.apply = true;
		config.data = mldl_cfg;
		result = slave_descr->config(
			slave_client->adapter, slave_descr,
			slave_pdata, &config);
		if (result) {
			LOG_RESULT_LOCATION(result);
			goto out_slavedescr_exit;
		}
	}
	pdata_slave[slave_descr->type] = slave_pdata;
	mpu->slave_modules[slave_descr->type] = slave_module;
	mldl_cfg->slave[slave_descr->type] = slave_descr;

	goto out_unlock_mutex;

out_slavedescr_exit:
	if (slave_descr->exit)
		slave_descr->exit(slave_client->adapter,
				  slave_descr, slave_pdata);
out_unlock_mutex:
	mutex_unlock(&mpu->mutex);

	if (!result && irq_name && (slave_pdata->irq > 0)) {
		int warn_result;
		dev_info(&slave_client->adapter->dev,
			"Installing %s irq using %d\n",
			irq_name,
			slave_pdata->irq);
		warn_result = slaveirq_init(slave_client->adapter,
					slave_pdata, irq_name);
		if (result)
			dev_WARN(&slave_client->adapter->dev,
				"%s irq assigned error: %d\n",
				slave_descr->name, warn_result);
	} else {
		dev_WARN(&slave_client->adapter->dev,
			"%s irq not assigned: %d %d %d\n",
			slave_descr->name,
			result, (int)irq_name, slave_pdata->irq);
	}

	return result;
}
Exemple #24
0
int mpu_probe(struct i2c_client *client, const struct i2c_device_id *devid)
{
	struct mpu_platform_data *pdata;
	struct mpu_private_data *mpu;
	struct mldl_cfg *mldl_cfg;
	int res = 0;
	int ii = 0;

	dev_info(&client->adapter->dev, "%s: %d\n", __func__, ii++);

	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
		res = -ENODEV;
		goto out_check_functionality_failed;
	}

//[ECID:000000] ZTEBSP wanghaifei start 20120221, add sys file for sensor detect
        res = sysfs_create_group(&client->dev.kobj, &mpu3050_dev_attr_grp);
        if (res) {
                dev_err(&client->adapter->dev, "mpu3050 create sys file failed\n");
                goto out_create_sysfs_grp;
        }    
//[ECID:000000] ZTEBSP wanghaifei end 20120221, add sys file for sensor detect

	mpu = kzalloc(sizeof(struct mpu_private_data), GFP_KERNEL);
	if (!mpu) {
		res = -ENOMEM;
		goto out_alloc_data_failed;
	}
	mldl_cfg = &mpu->mldl_cfg;
	mldl_cfg->mpu_ram	= &mpu->mpu_ram;
	mldl_cfg->mpu_gyro_cfg	= &mpu->mpu_gyro_cfg;
	mldl_cfg->mpu_offsets	= &mpu->mpu_offsets;
	mldl_cfg->mpu_chip_info	= &mpu->mpu_chip_info;
	mldl_cfg->inv_mpu_cfg	= &mpu->inv_mpu_cfg;
	mldl_cfg->inv_mpu_state	= &mpu->inv_mpu_state;

	mldl_cfg->mpu_ram->length = MPU_MEM_NUM_RAM_BANKS * MPU_MEM_BANK_SIZE;
	mldl_cfg->mpu_ram->ram = kzalloc(mldl_cfg->mpu_ram->length, GFP_KERNEL);
	if (!mldl_cfg->mpu_ram->ram) {
		res = -ENOMEM;
		goto out_alloc_ram_failed;
	}
	mpu_private_data = mpu;
	i2c_set_clientdata(client, mpu);
	mpu->client = client;

	init_waitqueue_head(&mpu->mpu_event_wait);
	mutex_init(&mpu->mutex);
	init_completion(&mpu->completion);

	mpu->response_timeout = 60;	/* Seconds */
	mpu->timeout.function = mpu_pm_timeout;
	mpu->timeout.data = (u_long) mpu;
	init_timer(&mpu->timeout);

	mpu->nb.notifier_call = mpu_pm_notifier_callback;
	mpu->nb.priority = 0;
	res = register_pm_notifier(&mpu->nb);
	if (res) {
		dev_err(&client->adapter->dev,
			"Unable to register pm_notifier %d\n", res);
		goto out_register_pm_notifier_failed;
	}

	pdata = (struct mpu_platform_data *)client->dev.platform_data;
	if (!pdata) {
		dev_WARN(&client->adapter->dev,
			 "Missing platform data for mpu\n");
	}
	mldl_cfg->pdata = pdata;

	mldl_cfg->mpu_chip_info->addr = client->addr;
	res = inv_mpu_open(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL);

	if (res) {
		dev_err(&client->adapter->dev,
			"Unable to open %s %d\n", MPU_NAME, res);
		res = -ENODEV;
		goto out_whoami_failed;
	}

	mpu->dev.minor = MISC_DYNAMIC_MINOR;
	mpu->dev.name = "mpu";
	mpu->dev.fops = &mpu_fops;
	res = misc_register(&mpu->dev);
	if (res < 0) {
		dev_err(&client->adapter->dev,
			"ERROR: misc_register returned %d\n", res);
		goto out_misc_register_failed;
	}

	if (client->irq) {
		dev_info(&client->adapter->dev,
			 "Installing irq using %d\n", client->irq);
		res = mpuirq_init(client, mldl_cfg);
		if (res)
			goto out_mpuirq_failed;
	} else {
		dev_WARN(&client->adapter->dev,
			 "Missing %s IRQ\n", MPU_NAME);
	}
	return res;

out_mpuirq_failed:
	misc_deregister(&mpu->dev);
out_misc_register_failed:
	inv_mpu_close(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL);
out_whoami_failed:
	unregister_pm_notifier(&mpu->nb);
out_register_pm_notifier_failed:
	kfree(mldl_cfg->mpu_ram->ram);
	mpu_private_data = NULL;
out_alloc_ram_failed:
	kfree(mpu);
out_alloc_data_failed:
//[ECID:000000] ZTEBSP wanghaifei start 20120221, don't remove sys file if failed
out_create_sysfs_grp:
//[ECID:000000] ZTEBSP wanghaifei end 20120221, don't remove sys file if failed
out_check_functionality_failed:
	dev_err(&client->adapter->dev, "%s failed %d\n", __func__, res);
	return res;

}
static int msm_hsphy_set_suspend(struct usb_phy *uphy, int suspend)
{
	struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy);
	bool host = uphy->flags & PHY_HOST_MODE;
	bool chg_connected = uphy->flags & PHY_CHARGER_CONNECTED;
	int i, count;

	if (!!suspend == phy->suspended) {
		dev_dbg(uphy->dev, "%s\n", suspend ? "already suspended"
						   : "already resumed");
		return 0;
	}

	if (suspend) {
		for (i = 0; i < phy->num_ports; i++) {
			/* Clear interrupt latch register */
			writel_relaxed(ALT_INTERRUPT_MASK,
				phy->base + HS_PHY_IRQ_STAT_REG(i));

			/* Enable DP and DM HV interrupts */
			if (phy->core_ver >= MSM_CORE_VER_120)
				msm_usb_write_readback(phy->base,
						ALT_INTERRUPT_EN_REG(i),
						(LINESTATE_INTEN |
						DPINTEN | DMINTEN),
						(LINESTATE_INTEN |
						DPINTEN | DMINTEN));
			else
				msm_usb_write_readback(phy->base,
						ALT_INTERRUPT_EN_REG(i),
						DPDMHV_INT_MASK,
						DPDMHV_INT_MASK);
			if (!host) {
				/* set the following:
				 * OTGDISABLE0=1
				 * USB2_SUSPEND_N_SEL=1, USB2_SUSPEND_N=0
				 */
				if (phy->core_ver >= MSM_CORE_VER_120)
					msm_usb_write_readback(phy->base,
							HS_PHY_CTRL_COMMON_REG,
							COMMON_OTGDISABLE0,
							COMMON_OTGDISABLE0);
				else
					msm_usb_write_readback(phy->base,
						HS_PHY_CTRL_REG(i),
						OTGDISABLE0, OTGDISABLE0);

				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_REG(i),
					(USB2_SUSPEND_N_SEL | USB2_SUSPEND_N),
					USB2_SUSPEND_N_SEL);
			}

			if (!phy->ext_vbus_id)
				/* Enable PHY-based IDHV and
				 *OTGSESSVLD HV interrupts
				 */
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_REG(i),
					(OTGSESSVLDHV_INTEN | IDHV_INTEN),
					(OTGSESSVLDHV_INTEN | IDHV_INTEN));
		}

		/* Enable PHY retention */
		if (!host && !chg_connected) {
			if (phy->core_ver == MSM_CORE_VER_120 &&
					phy->set_pllbtune)
				/*
				 * On this particular revision the PLLITUNE[1]
				 * bit acts as the control for the RETENABLEN
				 * PHY signal.
				 */
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_COMMON_REG,
					COMMON_PLLITUNE_1, COMMON_PLLITUNE_1);
			else if (phy->core_ver >= MSM_CORE_VER_120)
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_COMMON_REG,
					COMMON_RETENABLEN, 0);
			else
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_REG(0),
					RETENABLEN, 0);

			if (phy->csr) {
				/* switch PHY control to USB2PHY CSRs */
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_CFG0,
						USB2PHY_OVERRIDE_EN,
						USB2PHY_OVERRIDE_EN);
				/* clear suspend_n */
				msm_usb_write_readback(phy->csr,
						USB2PHY_HS_PHY_CTRL2,
						USB2PHY_SUSPEND_N_SEL |
						USB2PHY_SUSPEND_N,
						USB2PHY_SUSPEND_N_SEL);
				/* enable retention */
				msm_usb_write_readback(phy->csr,
						USB2PHY_HS_PHY_CTRL_COMMON0,
						USB2PHY_COMMONONN |
						USB2PHY_RETENABLEN, 0);
				/* disable internal ref clock buffer */
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_REFCLK_CTRL,
						REFCLK_RXTAP_EN, 0);
				/* power down PHY */
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_PWRDOWN_CTRL,
						PWRDN_B, 0);
			}

			phy->lpm_flags |= PHY_RETENTIONED;
		}

		/* can turn off regulators if disconnected in device mode */
		if (phy->lpm_flags & PHY_RETENTIONED && !phy->cable_connected) {
			if (phy->ext_vbus_id) {
				msm_hsusb_ldo_enable(phy, 0);
				phy->lpm_flags |= PHY_PWR_COLLAPSED;
			}
			msm_hsusb_config_vdd(phy, 0);
		}

		count = atomic_dec_return(&hsphy_active_count);
		if (count < 0) {
			dev_WARN(uphy->dev, "hsphy_active_count=%d, something wrong?\n",
					count);
			atomic_set(&hsphy_active_count, 0);
		}
	} else {
		atomic_inc(&hsphy_active_count);
		if (phy->lpm_flags & PHY_RETENTIONED && !phy->cable_connected) {
			msm_hsusb_config_vdd(phy, 1);
			if (phy->ext_vbus_id) {
				msm_hsusb_ldo_enable(phy, 1);
				phy->lpm_flags &= ~PHY_PWR_COLLAPSED;
			}

			if (phy->csr) {
				/* power on PHY */
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_PWRDOWN_CTRL,
						PWRDN_B, PWRDN_B);
				/* enable internal ref clock buffer */
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_REFCLK_CTRL,
						REFCLK_RXTAP_EN,
						REFCLK_RXTAP_EN);
				/* disable retention */
				msm_usb_write_readback(phy->csr,
						USB2PHY_HS_PHY_CTRL_COMMON0,
						USB2PHY_COMMONONN |
						USB2PHY_RETENABLEN,
						USB2PHY_COMMONONN |
						USB2PHY_RETENABLEN);
				/* switch suspend_n_sel back to HW */
				msm_usb_write_readback(phy->csr,
						USB2PHY_HS_PHY_CTRL2,
						USB2PHY_SUSPEND_N_SEL |
						USB2PHY_SUSPEND_N, 0);
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_CFG0,
						USB2PHY_OVERRIDE_EN, 0);
			}

			/* Disable PHY retention */
			if (phy->core_ver == MSM_CORE_VER_120 &&
					phy->set_pllbtune)
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_COMMON_REG,
					COMMON_PLLITUNE_1, 0);
			else if (phy->core_ver >= MSM_CORE_VER_120)
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_COMMON_REG,
					COMMON_RETENABLEN, COMMON_RETENABLEN);
			else
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_REG(0),
					RETENABLEN, RETENABLEN);
			phy->lpm_flags &= ~PHY_RETENTIONED;
		}

		if (phy->core_ver >= MSM_CORE_VER_120) {
			if (phy->set_pllbtune) {
				msm_usb_write_readback(phy->base,
						HS_PHY_CTRL_COMMON_REG,
						FSEL_MASK, 0);
			} else {
				msm_usb_write_readback(phy->base,
						HS_PHY_CTRL_COMMON_REG,
						FSEL_MASK, FSEL_DEFAULT);
			}
		}

		for (i = 0; i < phy->num_ports; i++) {
			if (!phy->ext_vbus_id)
				/* Disable HV interrupts */
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_REG(i),
					(OTGSESSVLDHV_INTEN | IDHV_INTEN),
					0);

			/* Clear interrupt latch register */
			writel_relaxed(ALT_INTERRUPT_MASK,
				phy->base + HS_PHY_IRQ_STAT_REG(i));
			/* Disable DP and DM HV interrupt */
			if (phy->core_ver >= MSM_CORE_VER_120)
				msm_usb_write_readback(phy->base,
						ALT_INTERRUPT_EN_REG(i),
						LINESTATE_INTEN, 0);
			else
				msm_usb_write_readback(phy->base,
						ALT_INTERRUPT_EN_REG(i),
						DPDMHV_INT_MASK, 0);
			if (!host) {
				/* Bring PHY out of suspend */
				msm_usb_write_readback(phy->base,
						HS_PHY_CTRL_REG(i),
						USB2_SUSPEND_N_SEL, 0);

				if (phy->core_ver >= MSM_CORE_VER_120)
					msm_usb_write_readback(phy->base,
							HS_PHY_CTRL_COMMON_REG,
							COMMON_OTGDISABLE0,
							0);
				else
					msm_usb_write_readback(phy->base,
							HS_PHY_CTRL_REG(i),
							OTGDISABLE0, 0);
			}
		}
		/*
		 * write HSPHY init value to QSCRATCH reg to set HSPHY
		 * parameters like VBUS valid threshold, disconnect valid
		 * threshold, DC voltage level,preempasis and rise/fall time
		 */
		dev_dbg(uphy->dev, "%s set params\n", __func__);
		msm_hsphy_set_params(uphy);
	}

	phy->suspended = !!suspend; /* double-NOT coerces to bool value */
	return 0;
}
Exemple #26
0
static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
{
	struct ucsi *ucsi = data;
	struct ucsi_cci *cci;

	spin_lock(&ucsi->dev_lock);

	ucsi->status = UCSI_IDLE;
	cci = &ucsi->data->cci;

	/*
	 * REVISIT: This is not documented behavior, but all known PPMs ACK
	 * asynchronous events by sending notification with cleared CCI.
	 */
	if (!ucsi->data->raw_cci) {
		if (test_bit(EVENT_PENDING, &ucsi->flags))
			complete(&ucsi->complete);
		else
			dev_WARN(ucsi->dev, "spurious notification\n");
		goto out_unlock;
	}

	if (test_bit(COMMAND_PENDING, &ucsi->flags)) {
		if (cci->busy) {
			ucsi->status = UCSI_BUSY;
			complete(&ucsi->complete);

			goto out_unlock;
		} else if (cci->ack_complete || cci->cmd_complete) {
			/* Error Indication is only valid with commands */
			if (cci->error && cci->cmd_complete)
				ucsi->status = UCSI_ERROR;

			ucsi->data->ctrl.raw_cmd = 0;
			complete(&ucsi->complete);
		}
	}

	if (cci->connector_change) {
		struct ucsi_connector *con;

		/*
		 * This is workaround for buggy PPMs that create asynchronous
		 * event notifications before OPM has enabled them.
		 */
		if (!ucsi->connector)
			goto out_unlock;

		con = ucsi->connector + (cci->connector_change - 1);

		/*
		 * PPM will not clear the connector specific bit in Connector
		 * Change Indication field of CCI until the driver has ACK it,
		 * and the driver can not ACK it before it has been processed.
		 * The PPM will not generate new events before the first has
		 * been acknowledged, even if they are for an other connector.
		 * So only one event at a time.
		 */
		if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
			schedule_work(&con->work);
	}
out_unlock:
	spin_unlock(&ucsi->dev_lock);
}
Exemple #27
0
/**
 * intel_mid_i2s_rd_req - request a read from i2s peripheral
 * @drv_data : handle of corresponding ssp i2s (given by i2s_open function)
 * @dst : destination buffer where the read sample should be put
 * @len : number of sample to be read (160 samples only right now)
 * @param : private context parameter to give back to read callback
 *
 * Output parameters
 *      error : 0 means no error
 */
int intel_mid_i2s_rd_req(struct intel_mid_i2s_hdl *drv_data,
				u32 *destination, size_t len, void *param)
{
	struct dma_async_tx_descriptor *rxdesc = NULL;
	struct dma_chan *rxchan = drv_data->rxchan;
	enum dma_ctrl_flags flag;
	dma_addr_t ssdr_addr;
	dma_addr_t dst;
	WARN(!drv_data, "Driver data=NULL\n");
	if (!drv_data)
		return -EFAULT;
	if (!rxchan) {
		dev_WARN(&(drv_data->pdev->dev), "rd_req FAILED no rxchan\n");
		return -EINVAL;
	}
	if (!len) {
		dev_WARN(&drv_data->pdev->dev, "rd req invalid len=0");
		return -EINVAL;
	}

	dev_dbg(&drv_data->pdev->dev, "I2S_READ() dst=%p, len=%d, drv_data=%p",
						destination, len, drv_data);
	dst = dma_map_single(NULL, destination, len, DMA_FROM_DEVICE);
	if (!dst) {
		dev_WARN(&drv_data->pdev->dev, "can't map DMA address %p",
								destination);
		return -ENOMEM;
	}

	drv_data->read_dst = dst;
	drv_data->read_len = len;
	/* get Data Read/Write address */
	ssdr_addr = (drv_data->paddr + OFFSET_SSDR);
	set_SSCR1_reg((drv_data->ioaddr), RSRE);
	change_SSCR0_reg((drv_data->ioaddr), RIM,
			 ((drv_data->current_settings).rx_fifo_interrupt));
	flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
	/* Start the RX dma transfer */
	rxdesc = rxchan->device->device_prep_dma_memcpy(
			rxchan,		/* DMA Channel */
			dst,		/* DAR */
			ssdr_addr,	/* SAR */
			len,		/* Data Length */
			flag);		/* Flag */
	if (!rxdesc) {
		dev_WARN(&drv_data->pdev->dev, "can not prep dma memcpy");
		return -EFAULT;
	}
	/* Only 1 READ at a time allowed. do it at end to avoid clear&wakeup*/
	if (test_and_set_bit(I2S_PORT_READ_BUSY, &drv_data->flags)) {
		dma_unmap_single(NULL, dst, len, DMA_FROM_DEVICE);
		dev_WARN(&drv_data->pdev->dev, "RD reject I2S_PORT READ_BUSY");
		return -EBUSY;
	}
	dev_dbg(&(drv_data->pdev->dev), "RD dma tx submit\n");
	rxdesc->callback = i2s_read_done;
	drv_data->read_param = param;
	rxdesc->callback_param = drv_data;
	rxdesc->tx_submit(rxdesc);
	return 0;
}