int bcm2835_audio_set_params(bcm2835_alsa_stream_t * alsa_stream,
			     uint32_t channels, uint32_t samplerate,
			     uint32_t bps)
{
	VC_AUDIO_MSG_T m;
	AUDIO_INSTANCE_T *instance = alsa_stream->instance;
	int32_t success;
	int ret;
	LOG_DBG(" .. IN\n");

	LOG_INFO
	    (" Setting ALSA channels(%d), samplerate(%d), bits-per-sample(%d)\n",
	     channels, samplerate, bps);

	/* resend ctls - alsa_stream may not have been open when first send */
	ret = bcm2835_audio_set_ctls_chan(alsa_stream, alsa_stream->chip);
	if (ret != 0) {
		LOG_ERR(" Alsa controls not supported\n");
		return -EINVAL;
	}

	if(mutex_lock_interruptible(&instance->vchi_mutex))
	{
		LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",instance->num_connections);
		return -EINTR;
	}
	vchi_service_use(instance->vchi_handle[0]);

	instance->result = -1;

	m.type = VC_AUDIO_MSG_TYPE_CONFIG;
	m.u.config.channels = channels;
	m.u.config.samplerate = samplerate;
	m.u.config.bps = bps;

	/* Create the message available completion */
	init_completion(&instance->msg_avail_comp);

	/* Send the message to the videocore */
	success = vchi_msg_queue(instance->vchi_handle[0],
				 &m, sizeof m,
				 VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);

	if (success != 0) {
		LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
			__func__, success);

		ret = -1;
		goto unlock;
	}

	/* We are expecting a reply from the videocore */
	ret = wait_for_completion_interruptible(&instance->msg_avail_comp);
	if (ret) {
		LOG_ERR("%s: failed on waiting for event (status=%d)\n",
			__func__, success);
		goto unlock;
	}

	if (instance->result != 0) {
		LOG_ERR("%s: result=%d", __func__, instance->result);

		ret = -1;
		goto unlock;
	}

	ret = 0;

unlock:
	vchi_service_release(instance->vchi_handle[0]);
	mutex_unlock(&instance->vchi_mutex);

	LOG_DBG(" .. OUT\n");
	return ret;
}
static int __init smd_tty_init(void)
{
	int ret;
	int n;
	int idx;

	smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS);
	if (smd_tty_driver == 0)
		return -ENOMEM;

	smd_tty_driver->owner = THIS_MODULE;
	smd_tty_driver->driver_name = "smd_tty_driver";
	smd_tty_driver->name = "smd";
	smd_tty_driver->major = 0;
	smd_tty_driver->minor_start = 0;
	smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
	smd_tty_driver->subtype = SERIAL_TYPE_NORMAL;
	smd_tty_driver->init_termios = tty_std_termios;
	smd_tty_driver->init_termios.c_iflag = 0;
	smd_tty_driver->init_termios.c_oflag = 0;
	smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
	smd_tty_driver->init_termios.c_lflag = 0;
	smd_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS |
		TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
	tty_set_operations(smd_tty_driver, &smd_tty_ops);

	ret = tty_register_driver(smd_tty_driver);
	if (ret) {
		put_tty_driver(smd_tty_driver);
		pr_err("%s: driver registration failed %d\n", __func__, ret);
		return ret;
	}

	for (n = 0; n < ARRAY_SIZE(smd_configs); ++n) {
		idx = smd_configs[n].tty_dev_index;

		if (smd_configs[n].dev_name == NULL)
			smd_configs[n].dev_name = smd_configs[n].port_name;
#if 0
		if (idx == DS_IDX) {
			/*
			 * DS port uses the kernel API starting with
			 * 8660 Fusion.  Only register the userspace
			 * platform device for older targets.
			 */
			int legacy_ds = 0;

			legacy_ds |= cpu_is_msm7x01() || cpu_is_msm7x25();
			legacy_ds |= cpu_is_msm7x27() || cpu_is_msm7x30();
			legacy_ds |= cpu_is_qsd8x50() || cpu_is_msm8x55();
			/*
			 * use legacy mode for 8660 Standalone (subtype 0)
			 */
			legacy_ds |= cpu_is_msm8x60() &&
					(socinfo_get_platform_subtype() == 0x0);

			if (!legacy_ds)
				continue;
		}
#endif
		tty_register_device(smd_tty_driver, idx, 0);
		init_completion(&smd_tty[idx].ch_allocated);

		/* register platform device */
		smd_tty[idx].driver.probe = smd_tty_dummy_probe;
		smd_tty[idx].driver.driver.name = smd_configs[n].dev_name;
		smd_tty[idx].driver.driver.owner = THIS_MODULE;
		spin_lock_init(&smd_tty[idx].reset_lock);
		smd_tty[idx].is_open = 0;
		setup_timer(&smd_tty[idx].buf_req_timer, buf_req_retry,
				(unsigned long)&smd_tty[idx]);
		init_waitqueue_head(&smd_tty[idx].ch_opened_wait_queue);
		ret = platform_driver_register(&smd_tty[idx].driver);

		if (ret) {
			pr_err("%s: init failed %d (%d)\n", __func__, idx, ret);
			smd_tty[idx].driver.probe = NULL;
			goto out;
		}
		smd_tty[idx].smd = &smd_configs[n];
	}
	INIT_DELAYED_WORK(&loopback_work, loopback_probe_worker);
	return 0;

out:
	/* unregister platform devices */
	for (n = 0; n < ARRAY_SIZE(smd_configs); ++n) {
		idx = smd_configs[n].tty_dev_index;

		if (smd_tty[idx].driver.probe) {
			platform_driver_unregister(&smd_tty[idx].driver);
			tty_unregister_device(smd_tty_driver, idx);
		}
	}

	tty_unregister_driver(smd_tty_driver);
	put_tty_driver(smd_tty_driver);
	return ret;
}
Пример #3
0
static int __devinit omap2_onenand_probe(struct platform_device *pdev)
{
    struct omap_onenand_platform_data *pdata;
    struct omap2_onenand *c;
    struct onenand_chip *this;
    int r;

    pdata = pdev->dev.platform_data;
    if (pdata == NULL) {
        dev_err(&pdev->dev, "platform data missing\n");
        return -ENODEV;
    }

    c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
    if (!c)
        return -ENOMEM;

    init_completion(&c->irq_done);
    init_completion(&c->dma_done);
    c->gpmc_cs = pdata->cs;
    c->gpio_irq = pdata->gpio_irq;
    c->dma_channel = pdata->dma_channel;
    if (c->dma_channel < 0) {
        /* if -1, don't use DMA */
        c->gpio_irq = 0;
    }

    r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
    if (r < 0) {
        dev_err(&pdev->dev, "Cannot request GPMC CS\n");
        goto err_kfree;
    }

    if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
                           pdev->dev.driver->name) == NULL) {
        dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
                "size: 0x%x\n",	c->phys_base, ONENAND_IO_SIZE);
        r = -EBUSY;
        goto err_free_cs;
    }
    c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
    if (c->onenand.base == NULL) {
        r = -ENOMEM;
        goto err_release_mem_region;
    }

    if (pdata->onenand_setup != NULL) {
        r = pdata->onenand_setup(c->onenand.base, &c->freq);
        if (r < 0) {
            dev_err(&pdev->dev, "Onenand platform setup failed: "
                    "%d\n", r);
            goto err_iounmap;
        }
        c->setup = pdata->onenand_setup;
    }

    if (c->gpio_irq) {
        if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
            dev_err(&pdev->dev,  "Failed to request GPIO%d for "
                    "OneNAND\n", c->gpio_irq);
            goto err_iounmap;
        }
        gpio_direction_input(c->gpio_irq);

        if ((r = request_irq(gpio_to_irq(c->gpio_irq),
                             omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
                             pdev->dev.driver->name, c)) < 0)
            goto err_release_gpio;
    }

    if (c->dma_channel >= 0) {
        r = omap_request_dma(0, pdev->dev.driver->name,
                             omap2_onenand_dma_cb, (void *) c,
                             &c->dma_channel);
        if (r == 0) {
            omap_set_dma_write_mode(c->dma_channel,
                                    OMAP_DMA_WRITE_NON_POSTED);
            omap_set_dma_src_data_pack(c->dma_channel, 1);
            omap_set_dma_src_burst_mode(c->dma_channel,
                                        OMAP_DMA_DATA_BURST_8);
            omap_set_dma_dest_data_pack(c->dma_channel, 1);
            omap_set_dma_dest_burst_mode(c->dma_channel,
                                         OMAP_DMA_DATA_BURST_8);
        } else {
            dev_info(&pdev->dev,
                     "failed to allocate DMA for OneNAND, "
                     "using PIO instead\n");
            c->dma_channel = -1;
        }
    }

    dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
             "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
             c->onenand.base, c->freq);

    c->pdev = pdev;
    c->mtd.name = dev_name(&pdev->dev);
    c->mtd.priv = &c->onenand;
    c->mtd.owner = THIS_MODULE;

    c->mtd.dev.parent = &pdev->dev;

    this = &c->onenand;
    if (c->dma_channel >= 0) {
        this->wait = omap2_onenand_wait;
        if (cpu_is_omap34xx()) {
            this->read_bufferram = omap3_onenand_read_bufferram;
            this->write_bufferram = omap3_onenand_write_bufferram;
        } else {
            this->read_bufferram = omap2_onenand_read_bufferram;
            this->write_bufferram = omap2_onenand_write_bufferram;
        }
    }

    if (pdata->regulator_can_sleep) {
        c->regulator = regulator_get(&pdev->dev, "vonenand");
        if (IS_ERR(c->regulator)) {
            dev_err(&pdev->dev,  "Failed to get regulator\n");
            r = PTR_ERR(c->regulator);
            goto err_release_dma;
        }
        c->onenand.enable = omap2_onenand_enable;
        c->onenand.disable = omap2_onenand_disable;
    }

    if (pdata->skip_initial_unlocking)
        this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;

    if ((r = onenand_scan(&c->mtd, 1)) < 0)
        goto err_release_regulator;

    r = mtd_device_parse_register(&c->mtd, NULL, 0,
                                  pdata ? pdata->parts : NULL,
                                  pdata ? pdata->nr_parts : 0);
    if (r)
        goto err_release_onenand;

    platform_set_drvdata(pdev, c);

    return 0;

err_release_onenand:
    onenand_release(&c->mtd);
err_release_regulator:
    regulator_put(c->regulator);
err_release_dma:
    if (c->dma_channel != -1)
        omap_free_dma(c->dma_channel);
    if (c->gpio_irq)
        free_irq(gpio_to_irq(c->gpio_irq), c);
err_release_gpio:
    if (c->gpio_irq)
        gpio_free(c->gpio_irq);
err_iounmap:
    iounmap(c->onenand.base);
err_release_mem_region:
    release_mem_region(c->phys_base, ONENAND_IO_SIZE);
err_free_cs:
    gpmc_cs_free(c->gpmc_cs);
err_kfree:
    kfree(c);

    return r;
}
Пример #4
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * Do some preliminary argument and permissions checking before we
	 * actually start allocating stuff
	 */
	if (clone_flags & CLONE_NEWUSER) {
		if (clone_flags & CLONE_THREAD)
			return -EINVAL;
		/* hopefully this check will go away when userns support is
		 * complete
		 */
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
				!capable(CAP_SETGID))
			return -EPERM;
	}

	/*
	 * We hope to recycle these flags after 2.6.26
	 */
	if (unlikely(clone_flags & CLONE_STOPPED)) {
		static int __read_mostly count = 100;

		if (count > 0 && printk_ratelimit()) {
			char comm[TASK_COMM_LEN];

			count--;
			printk(KERN_INFO "fork(): process `%s' used deprecated "
					"clone flags 0x%lx\n",
				get_task_comm(comm, current),
				clone_flags & CLONE_STOPPED);
		}
	}

	/*
	 * When called from kernel_thread, don't do user tracing stuff.
	 */
	if (likely(user_mode(regs)))
		trace = tracehook_prepare_clone(clone_flags);

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		audit_finish_fork(p);
		tracehook_report_clone(regs, clone_flags, nr, p);

		/*
		 * We set PF_STARTING at creation in case tracing wants to
		 * use this to distinguish a fully live task from one that
		 * hasn't gotten to tracehook_report_clone() yet.  Now we
		 * clear it and set the child going.
		 */
		p->flags &= ~PF_STARTING;

		if (unlikely(clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
			__set_task_state(p, TASK_STOPPED);
		} else {
			wake_up_new_task(p, clone_flags);
		}

		tracehook_report_clone_complete(trace, regs,
						clone_flags, nr, p);

		if (clone_flags & CLONE_VFORK) {
			freezer_do_not_count();
			wait_for_completion(&vfork);
			freezer_count();
			tracehook_report_vfork_done(p, nr);
		}
	} else {
		nr = PTR_ERR(p);
	}
	return nr;
}
Пример #5
0
static int riic_i2c_probe(struct platform_device *pdev)
{
	struct riic_dev *riic;
	struct i2c_adapter *adap;
	struct resource *res;
	struct i2c_timings i2c_t;
	int i, ret;

	riic = devm_kzalloc(&pdev->dev, sizeof(*riic), GFP_KERNEL);
	if (!riic)
		return -ENOMEM;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	riic->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(riic->base))
		return PTR_ERR(riic->base);

	riic->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(riic->clk)) {
		dev_err(&pdev->dev, "missing controller clock");
		return PTR_ERR(riic->clk);
	}

	for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) {
		res = platform_get_resource(pdev, IORESOURCE_IRQ, riic_irqs[i].res_num);
		if (!res)
			return -ENODEV;

		ret = devm_request_irq(&pdev->dev, res->start, riic_irqs[i].isr,
					0, riic_irqs[i].name, riic);
		if (ret) {
			dev_err(&pdev->dev, "failed to request irq %s\n", riic_irqs[i].name);
			return ret;
		}
	}

	adap = &riic->adapter;
	i2c_set_adapdata(adap, riic);
	strlcpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
	adap->owner = THIS_MODULE;
	adap->algo = &riic_algo;
	adap->dev.parent = &pdev->dev;
	adap->dev.of_node = pdev->dev.of_node;

	init_completion(&riic->msg_done);

	i2c_parse_fw_timings(&pdev->dev, &i2c_t, true);

	pm_runtime_enable(&pdev->dev);

	ret = riic_init_hw(riic, &i2c_t);
	if (ret)
		goto out;

	ret = i2c_add_adapter(adap);
	if (ret)
		goto out;

	platform_set_drvdata(pdev, riic);

	dev_info(&pdev->dev, "registered with %dHz bus speed\n",
		 i2c_t.bus_freq_hz);
	return 0;

out:
	pm_runtime_disable(&pdev->dev);
	return ret;
}
Пример #6
0
int rtl_usb_probe(struct usb_interface *intf,
		  const struct usb_device_id *id,
		  struct rtl_hal_cfg *rtl_hal_cfg)
{
	int err;
	struct ieee80211_hw *hw = NULL;
	struct rtl_priv *rtlpriv = NULL;
	struct usb_device	*udev;
	struct rtl_usb_priv *usb_priv;

	hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
				sizeof(struct rtl_usb_priv), &rtl_ops);
	if (!hw) {
		RT_ASSERT(false, "ieee80211 alloc failed\n");
		return -ENOMEM;
	}
	rtlpriv = hw->priv;
	rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
				    GFP_KERNEL);
	if (!rtlpriv->usb_data)
		return -ENOMEM;

	/* this spin lock must be initialized early */
	spin_lock_init(&rtlpriv->locks.usb_lock);
	INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
		  rtl_fill_h2c_cmd_work_callback);
	INIT_WORK(&rtlpriv->works.lps_change_work,
		  rtl_lps_change_work_callback);

	rtlpriv->usb_data_index = 0;
	init_completion(&rtlpriv->firmware_loading_complete);
	SET_IEEE80211_DEV(hw, &intf->dev);
	udev = interface_to_usbdev(intf);
	usb_get_dev(udev);
	usb_priv = rtl_usbpriv(hw);
	memset(usb_priv, 0, sizeof(*usb_priv));
	usb_priv->dev.intf = intf;
	usb_priv->dev.udev = udev;
	usb_set_intfdata(intf, hw);
	/* init cfg & intf_ops */
	rtlpriv->rtlhal.interface = INTF_USB;
	rtlpriv->cfg = rtl_hal_cfg;
	rtlpriv->intf_ops = &rtl_usb_ops;
	rtl_dbgp_flag_init(hw);
	/* Init IO handler */
	_rtl_usb_io_handler_init(&udev->dev, hw);
	rtlpriv->cfg->ops->read_chip_version(hw);
	/*like read eeprom and so on */
	rtlpriv->cfg->ops->read_eeprom_info(hw);
	err = _rtl_usb_init(hw);
	if (err)
		goto error_out;
	rtl_usb_init_sw(hw);
	/* Init mac80211 sw */
	err = rtl_init_core(hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 "Can't allocate sw for mac80211\n");
		goto error_out;
	}
	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
		goto error_out;
	}
	rtlpriv->cfg->ops->init_sw_leds(hw);

	err = ieee80211_register_hw(hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 "Can't register mac80211 hw.\n");
		err = -ENODEV;
		goto error_out;
	}
	rtlpriv->mac80211.mac80211_registered = 1;

	set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
	return 0;

error_out:
	rtl_deinit_core(hw);
	_rtl_usb_io_handler_release(hw);
	usb_put_dev(udev);
	complete(&rtlpriv->firmware_loading_complete);
	return -ENODEV;
}
Пример #7
0
static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
{
	struct s3c2410_spi_info *pdata;
	struct s3c24xx_spi *hw;
	struct spi_master *master;
	struct resource *res;
	int err = 0;

	master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
	if (master == NULL) {
		dev_err(&pdev->dev, "No memory for spi_master\n");
		err = -ENOMEM;
		goto err_nomem;
	}

	hw = spi_master_get_devdata(master);
	memset(hw, 0, sizeof(struct s3c24xx_spi));

	hw->master = spi_master_get(master);
	hw->pdata = pdata = pdev->dev.platform_data;
	hw->dev = &pdev->dev;

	if (pdata == NULL) {
		dev_err(&pdev->dev, "No platform data supplied\n");
		err = -ENOENT;
		goto err_no_pdata;
	}

	platform_set_drvdata(pdev, hw);
	init_completion(&hw->done);

	/* initialise fiq handler */

	s3c24xx_spi_initfiq(hw);

	/* setup the master state. */

	/* the spi->mode bits understood by this driver: */
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;

	master->num_chipselect = hw->pdata->num_cs;
	master->bus_num = pdata->bus_num;

	/* setup the state for the bitbang driver */

	hw->bitbang.master         = hw->master;
	hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer;
	hw->bitbang.chipselect     = s3c24xx_spi_chipsel;
	hw->bitbang.txrx_bufs      = s3c24xx_spi_txrx;

	hw->master->setup  = s3c24xx_spi_setup;
	hw->master->cleanup = s3c24xx_spi_cleanup;

	dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);

	/* find and map our resources */

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
		err = -ENOENT;
		goto err_no_iores;
	}

	hw->ioarea = request_mem_region(res->start, resource_size(res),
					pdev->name);

	if (hw->ioarea == NULL) {
		dev_err(&pdev->dev, "Cannot reserve region\n");
		err = -ENXIO;
		goto err_no_iores;
	}

	hw->regs = ioremap(res->start, resource_size(res));
	if (hw->regs == NULL) {
		dev_err(&pdev->dev, "Cannot map IO\n");
		err = -ENXIO;
		goto err_no_iomap;
	}

	hw->irq = platform_get_irq(pdev, 0);
	if (hw->irq < 0) {
		dev_err(&pdev->dev, "No IRQ specified\n");
		err = -ENOENT;
		goto err_no_irq;
	}

	err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw);
	if (err) {
		dev_err(&pdev->dev, "Cannot claim IRQ\n");
		goto err_no_irq;
	}

	hw->clk = clk_get(&pdev->dev, "spi");
	if (IS_ERR(hw->clk)) {
		dev_err(&pdev->dev, "No clock for device\n");
		err = PTR_ERR(hw->clk);
		goto err_no_clk;
	}

	/* setup any gpio we can */

	if (!pdata->set_cs) {
		if (pdata->pin_cs < 0) {
			dev_err(&pdev->dev, "No chipselect pin\n");
			err = -EINVAL;
			goto err_register;
		}

		err = gpio_request(pdata->pin_cs, dev_name(&pdev->dev));
		if (err) {
			dev_err(&pdev->dev, "Failed to get gpio for cs\n");
			goto err_register;
		}

		hw->set_cs = s3c24xx_spi_gpiocs;
		gpio_direction_output(pdata->pin_cs, 1);
	} else
		hw->set_cs = pdata->set_cs;

	s3c24xx_spi_initialsetup(hw);

	/* register our spi controller */

	err = spi_bitbang_start(&hw->bitbang);
	if (err) {
		dev_err(&pdev->dev, "Failed to register SPI master\n");
		goto err_register;
	}

	return 0;

 err_register:
	if (hw->set_cs == s3c24xx_spi_gpiocs)
		gpio_free(pdata->pin_cs);

	clk_disable(hw->clk);
	clk_put(hw->clk);

 err_no_clk:
	free_irq(hw->irq, hw);

 err_no_irq:
	iounmap(hw->regs);

 err_no_iomap:
	release_resource(hw->ioarea);
	kfree(hw->ioarea);

 err_no_iores:
 err_no_pdata:
	spi_master_put(hw->master);

 err_nomem:
	return err;
}
Пример #8
0
/* This is the common part of the URB message submission code
 *
 * All URBs from the usb-storage driver involved in handling a queued scsi
 * command _must_ pass through this function (or something like it) for the
 * abort mechanisms to work properly.
 */
static int usb_stor_msg_common(struct us_data *us, int timeout)
{
	struct completion urb_done;
	long timeleft;
	int status;

	/* don't submit URBs during abort processing */
	if (test_bit(US_FLIDX_ABORTING, &us->dflags))
		return -EIO;

	/* set up data structures for the wakeup system */
	init_completion(&urb_done);

	/* fill the common fields in the URB */
	us->current_urb->context = &urb_done;
	us->current_urb->transfer_flags = 0;

	/* we assume that if transfer_buffer isn't us->iobuf then it
	 * hasn't been mapped for DMA.  Yes, this is clunky, but it's
	 * easier than always having the caller tell us whether the
	 * transfer buffer has already been mapped. */
	if (us->current_urb->transfer_buffer == us->iobuf)
		us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	us->current_urb->transfer_dma = us->iobuf_dma;

	/* submit the URB */
	status = usb_submit_urb(us->current_urb, GFP_NOIO);
	if (status) {
		/* something went wrong */
		return status;
	}

	/* since the URB has been submitted successfully, it's now okay
	 * to cancel it */
	set_bit(US_FLIDX_URB_ACTIVE, &us->dflags);

	/* did an abort occur during the submission? */
	if (test_bit(US_FLIDX_ABORTING, &us->dflags)) {

		/* cancel the URB, if it hasn't been cancelled already */
		if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) {
			US_DEBUGP("-- cancelling URB\n");
			usb_unlink_urb(us->current_urb);
		}
	}
#ifdef FEATURE_ANDROID_PANTECH_USB_OTG_MODE
 p_current_usb_us_data = us;
 is_urb_waiting = true;
#endif
	/* wait for the completion of the URB */
	timeleft = wait_for_completion_interruptible_timeout(
			&urb_done, timeout ? : MAX_SCHEDULE_TIMEOUT);
#ifdef FEATURE_ANDROID_PANTECH_USB_OTG_MODE
 is_urb_waiting = false;
 p_current_usb_us_data = NULL;
#endif
	clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags);

	if (timeleft <= 0) {
		US_DEBUGP("%s -- cancelling URB\n",
			  timeleft == 0 ? "Timeout" : "Signal");
		usb_kill_urb(us->current_urb);
	}

	/* return the URB status */
	return us->current_urb->status;
}
Пример #9
0
static ssize_t cpcusb_write(struct file *file, const char *buffer,
			    size_t count, loff_t * ppos)
{
	CPC_USB_T *card = (CPC_USB_T *) file->private_data;
	CPC_USB_WRITE_URB_T *wrUrb = NULL;

	ssize_t bytes_written = 0;
	int retval = 0;
	int j;

	unsigned char *obuf = NULL;
	unsigned char type = 0;
	CPC_MSG_T *info = NULL;

	dbg("%s - entered minor %d, count = %d, present = %d",
	    __FUNCTION__, card->minor, (int)count, card->present);

	if (count > sizeof(CPC_MSG_T))
		return CPC_ERR_UNKNOWN;

	/* check if can read from the given address */
	if (!access_ok(VERIFY_READ, buffer, count))
		return CPC_ERR_UNKNOWN;

	/* lock this object */
	down(&card->sem);

	/* verify that the device wasn't unplugged */
	if (!card->present) {
		retval = CPC_ERR_NO_INTERFACE_PRESENT;
		goto exit;
	}

	/* verify that we actually have some data to write */
	if (count == 0) {
		dbg("%s - write request of 0 bytes", __FUNCTION__);
		goto exit;
	}

	if (card->free_slots <= 5) {
		info = (CPC_MSG_T *) buffer;

		if (info->type != CPC_CMD_T_CLEAR_CMD_QUEUE
		    || card->free_slots <= 0) {
			dbg("%s - send buffer full please try again %d",
			    __FUNCTION__, card->free_slots);
			retval = CPC_ERR_CAN_NO_TRANSMIT_BUF;
			goto exit;
		}
	}

	/* Find a free write urb */
	for (j = 0; j < CPC_USB_URB_CNT; j++) {
		if (!atomic_read(&card->wrUrbs[j].busy)) {
			wrUrb = &card->wrUrbs[j];	/* remember found URB */
			atomic_set(&wrUrb->busy, 1);	/* lock this URB      */
			init_completion(&wrUrb->finished);	/* init completion    */
			dbg("WR URB no. %d started", j);
			break;
		}
	}

	/* don't found write urb say error */
	if (!wrUrb) {
		dbg("%s - no free send urb available", __FUNCTION__);
		retval = CPC_ERR_CAN_NO_TRANSMIT_BUF;
		goto exit;
	}
	dbg("URB write req");

	obuf = (unsigned char *) wrUrb->urb->transfer_buffer;

	/* copy the data from userspace into our transfer buffer;
	 * this is the only copy required.
	 */
	if (copy_from_user(&obuf[4], buffer, count) != 0) {
		atomic_set(&wrUrb->busy, 0);	/* release urb */
		retval = CPC_ERR_IO_TRANSFER;
		goto exit;
	}

	/* check if it is a DRIVER information message, so we can
	 * response to that message and not the USB
	 */
	info = (CPC_MSG_T *) & obuf[4];

	bytes_written = 11 + info->length;
	if (bytes_written >= wrUrb->size) {
		retval = CPC_ERR_IO_TRANSFER;
		goto exit;
	}

	switch (info->type) {
	case CPC_CMD_T_CLEAR_MSG_QUEUE:
		ResetBuffer(card->chan);
		break;

	case CPC_CMD_T_INQ_MSG_QUEUE_CNT:
		retval = cpc_get_buffer_count(card->chan);
		atomic_set(&wrUrb->busy, 0);

		goto exit;

	case CPC_CMD_T_INQ_INFO:
		if (info->msg.info.source == CPC_INFOMSG_T_DRIVER) {
			/* release urb cause we'll use it for driver
			 * information
			 */
			atomic_set(&wrUrb->busy, 0);
			if (IsBufferFull(card->chan)) {
				retval = CPC_ERR_IO_TRANSFER;
				goto exit;
			}

			/* it is a driver information request message and we have
			 * free rx slots to store the response
			 */
			type = info->msg.info.type;
			info = &card->chan->buf[card->chan->iidx];

			info->type = CPC_MSG_T_INFO;
			info->msg.info.source = CPC_INFOMSG_T_DRIVER;
			info->msg.info.type = type;

			switch (type) {
			case CPC_INFOMSG_T_VERSION:
				info->length = strlen(CPC_DRIVER_VERSION) + 2;
				sprintf(info->msg.info.msg, "%s\n",
					CPC_DRIVER_VERSION);
				break;

			case CPC_INFOMSG_T_SERIAL:
				info->length = strlen(CPC_DRIVER_SERIAL) + 2;
				sprintf(info->msg.info.msg, "%s\n",
					CPC_DRIVER_SERIAL);
				break;

			default:
				info->length = 2;
				info->msg.info.type =
				    CPC_INFOMSG_T_UNKNOWN_TYPE;
			}

			card->chan->WnR = 0;
			card->chan->iidx =
			    (card->chan->iidx + 1) % CPC_MSG_BUF_CNT;

			retval = info->length;
			goto exit;
		}
		break;
	case CPC_CMD_T_CAN_PRMS:
		/* Check the controller type. If it's the new CPC-USB, make sure if these are SJA1000 params */
		if (info->msg.canparams.cc_type != SJA1000
		    && info->msg.canparams.cc_type != M16C_BASIC
		    && (card->productId == USB_CPCUSB_LPC2119_PRODUCT_ID
			&& info->msg.canparams.cc_type != SJA1000)) {
			/* don't forget to release the urb */
			atomic_set(&wrUrb->busy, 0);
			retval = CPC_ERR_WRONG_CONTROLLER_TYPE;
			goto exit;
		}
		break;
	}

	/* just convert the params if it is an old CPC-USB with M16C controller */
	if (card->productId == USB_CPCUSB_M16C_PRODUCT_ID) {
		/* if it is a parameter message convert it from SJA1000 controller
		 * settings to M16C Basic controller settings
		 */
		SJA1000_TO_M16C_BASIC_Params((CPC_MSG_T *) & obuf[4]);
	}

	/* don't forget the byte alignment */
	cpcusb_align_buffer_alignment(&obuf[4]);

	/* setup a the 4 byte header */
	obuf[0] = obuf[1] = obuf[2] = obuf[3] = 0;

	/* this urb was already set up, except for this write size */
	wrUrb->urb->transfer_buffer_length = bytes_written + 4;

	/* send the data out the bulk port */
	/* a character device write uses GFP_KERNEL,
	   unless a spinlock is held */
	retval = usb_submit_urb(wrUrb->urb, GFP_KERNEL);
	if (retval) {
		atomic_set(&wrUrb->busy, 0);	/* release urb */
		err("%s - failed submitting write urb, error %d",
		    __FUNCTION__, retval);
	} else {
		retval = bytes_written;
	}

exit:
	/* unlock the device */
	up(&card->sem);

	dbg("%s - leaved", __FUNCTION__);

	return retval;
}
Пример #10
0
static int wmt_i2c_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct wmt_i2c_dev *i2c_dev;
	struct i2c_adapter *adap;
	struct resource *res;
	int err;
	u32 clk_rate;

	i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
	if (!i2c_dev) {
		dev_err(&pdev->dev, "device memory allocation failed\n");
		return -ENOMEM;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	i2c_dev->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(i2c_dev->base))
		return PTR_ERR(i2c_dev->base);

	i2c_dev->irq = irq_of_parse_and_map(np, 0);
	if (!i2c_dev->irq) {
		dev_err(&pdev->dev, "irq missing or invalid\n");
		return -EINVAL;
	}

	i2c_dev->clk = of_clk_get(np, 0);
	if (IS_ERR(i2c_dev->clk)) {
		dev_err(&pdev->dev, "unable to request clock\n");
		return PTR_ERR(i2c_dev->clk);
	}

	i2c_dev->mode = I2C_MODE_STANDARD;
	err = of_property_read_u32(np, "clock-frequency", &clk_rate);
	if ((!err) && (clk_rate == 400000))
		i2c_dev->mode = I2C_MODE_FAST;

	i2c_dev->dev = &pdev->dev;

	err = devm_request_irq(&pdev->dev, i2c_dev->irq, wmt_i2c_isr, 0,
							"i2c", i2c_dev);
	if (err) {
		dev_err(&pdev->dev, "failed to request irq %i\n", i2c_dev->irq);
		return err;
	}

	adap = &i2c_dev->adapter;
	i2c_set_adapdata(adap, i2c_dev);
	strlcpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
	adap->owner = THIS_MODULE;
	adap->algo = &wmt_i2c_algo;
	adap->dev.parent = &pdev->dev;
	adap->dev.of_node = pdev->dev.of_node;

	init_completion(&i2c_dev->complete);

	err = wmt_i2c_reset_hardware(i2c_dev);
	if (err) {
		dev_err(&pdev->dev, "error initializing hardware\n");
		return err;
	}

	err = i2c_add_adapter(adap);
	if (err) {
		dev_err(&pdev->dev, "failed to add adapter\n");
		return err;
	}

	platform_set_drvdata(pdev, i2c_dev);

	return 0;
}
Пример #11
0
/* Called from HCI core to initialize the device */
static int ti_st_open(struct hci_dev *hdev)
{
	unsigned long timeleft;
	struct ti_st *hst;
	int err, i;

	BT_DBG("%s %p", hdev->name, hdev);

	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
		return -EBUSY;

	/* provide contexts for callbacks from ST */
	hst = hci_get_drvdata(hdev);

	for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
		ti_st_proto[i].priv_data = hst;
		ti_st_proto[i].max_frame_size = HCI_MAX_FRAME_SIZE;
		ti_st_proto[i].recv = st_receive;
		ti_st_proto[i].reg_complete_cb = st_reg_completion_cb;

		/* Prepare wait-for-completion handler */
		init_completion(&hst->wait_reg_completion);
		/* Reset ST registration callback status flag,
		 * this value will be updated in
		 * st_reg_completion_cb()
		 * function whenever it called from ST driver.
		 */
		hst->reg_status = -EINPROGRESS;

		err = st_register(&ti_st_proto[i]);
		if (!err)
			goto done;

		if (err != -EINPROGRESS) {
			clear_bit(HCI_RUNNING, &hdev->flags);
			BT_ERR("st_register failed %d", err);
			return err;
		}

		/* ST is busy with either protocol
		 * registration or firmware download.
		 */
		BT_DBG("waiting for registration "
				"completion signal from ST");
		timeleft = wait_for_completion_timeout
			(&hst->wait_reg_completion,
			 msecs_to_jiffies(BT_REGISTER_TIMEOUT));
		if (!timeleft) {
			clear_bit(HCI_RUNNING, &hdev->flags);
			BT_ERR("Timeout(%d sec),didn't get reg "
					"completion signal from ST",
					BT_REGISTER_TIMEOUT / 1000);
			return -ETIMEDOUT;
		}

		/* Is ST registration callback
		 * called with ERROR status? */
		if (hst->reg_status != 0) {
			clear_bit(HCI_RUNNING, &hdev->flags);
			BT_ERR("ST registration completed with invalid "
					"status %d", hst->reg_status);
			return -EAGAIN;
		}

done:
		hst->st_write = ti_st_proto[i].write;
		if (!hst->st_write) {
			BT_ERR("undefined ST write function");
			clear_bit(HCI_RUNNING, &hdev->flags);
			for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
				/* Undo registration with ST */
				err = st_unregister(&ti_st_proto[i]);
				if (err)
					BT_ERR("st_unregister() failed with "
							"error %d", err);
				hst->st_write = NULL;
			}
			return -EIO;
		}
	}
	return 0;
}
Пример #12
0
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
				 struct sh_mmcif_plat_data *pdata)
{
	struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
	struct dma_slave_config cfg;
	dma_cap_mask_t mask;
	int ret;

	host->dma_active = false;

	if (!pdata)
		return;

	if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
		return;

	/* We can only either use DMA for both Tx and Rx or not use it at all */
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
					    (void *)pdata->slave_id_tx);
	dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
		host->chan_tx);

	if (!host->chan_tx)
		return;

	cfg.slave_id = pdata->slave_id_tx;
	cfg.direction = DMA_MEM_TO_DEV;
	cfg.dst_addr = res->start + MMCIF_CE_DATA;
	cfg.src_addr = 0;
	ret = dmaengine_slave_config(host->chan_tx, &cfg);
	if (ret < 0)
		goto ecfgtx;

	host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
					    (void *)pdata->slave_id_rx);
	dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
		host->chan_rx);

	if (!host->chan_rx)
		goto erqrx;

	cfg.slave_id = pdata->slave_id_rx;
	cfg.direction = DMA_DEV_TO_MEM;
	cfg.dst_addr = 0;
	cfg.src_addr = res->start + MMCIF_CE_DATA;
	ret = dmaengine_slave_config(host->chan_rx, &cfg);
	if (ret < 0)
		goto ecfgrx;

	init_completion(&host->dma_complete);

	return;

ecfgrx:
	dma_release_channel(host->chan_rx);
	host->chan_rx = NULL;
erqrx:
ecfgtx:
	dma_release_channel(host->chan_tx);
	host->chan_tx = NULL;
}
Пример #13
0
/*
 * netvsc_device_add - Callback when the device belonging to this
 * driver is added
 */
int netvsc_device_add(struct hv_device *device, void *additional_info)
{
	int ret = 0;
	int ring_size =
	((struct netvsc_device_info *)additional_info)->ring_size;
	struct netvsc_device *net_device;
	struct net_device *ndev;

	net_device = alloc_net_device(device);
	if (!net_device)
		return -ENOMEM;

	net_device->ring_size = ring_size;

	/*
	 * Coming into this function, struct net_device * is
	 * registered as the driver private data.
	 * In alloc_net_device(), we register struct netvsc_device *
	 * as the driver private data and stash away struct net_device *
	 * in struct netvsc_device *.
	 */
	ndev = net_device->ndev;

	/* Add netvsc_device context to netvsc_device */
	net_device->nd_ctx = netdev_priv(ndev);

	/* Initialize the NetVSC channel extension */
	init_completion(&net_device->channel_init_wait);

	set_per_channel_state(device->channel, net_device->cb_buffer);

	/* Open the channel */
	ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
			 ring_size * PAGE_SIZE, NULL, 0,
			 netvsc_channel_cb, device->channel);

	if (ret != 0) {
		netdev_err(ndev, "unable to open channel: %d\n", ret);
		goto cleanup;
	}

	/* Channel is opened */
	pr_info("hv_netvsc channel opened successfully\n");

	net_device->chn_table[0] = device->channel;

	/* Connect with the NetVsp */
	ret = netvsc_connect_vsp(device);
	if (ret != 0) {
		netdev_err(ndev,
			"unable to connect to NetVSP - %d\n", ret);
		goto close;
	}

	return ret;

close:
	/* Now, we can close the channel safely */
	vmbus_close(device->channel);

cleanup:
	free_netvsc_device(net_device);

	return ret;
}
int bcm2835_audio_close(bcm2835_alsa_stream_t * alsa_stream)
{
	VC_AUDIO_MSG_T m;
	AUDIO_INSTANCE_T *instance = alsa_stream->instance;
	int32_t success;
	int ret;
	LOG_DBG(" .. IN\n");

	my_workqueue_quit(alsa_stream);

	if(mutex_lock_interruptible(&instance->vchi_mutex))
	{
		LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",instance->num_connections);
		return -EINTR;
	}
	vchi_service_use(instance->vchi_handle[0]);

	m.type = VC_AUDIO_MSG_TYPE_CLOSE;

	/* Create the message available completion */
	init_completion(&instance->msg_avail_comp);

	/* Send the message to the videocore */
	success = vchi_msg_queue(instance->vchi_handle[0],
				 &m, sizeof m,
				 VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);

	if (success != 0) {
		LOG_ERR("%s: failed on vchi_msg_queue (status=%d)",
			__func__, success);
		ret = -1;
		goto unlock;
	}

	ret = wait_for_completion_interruptible(&instance->msg_avail_comp);
	if (ret) {
		LOG_ERR("%s: failed on waiting for event (status=%d)",
			__func__, success);
		goto unlock;
	}
	if (instance->result != 0) {
		LOG_ERR("%s: failed result (status=%d)",
			__func__, instance->result);

		ret = -1;
		goto unlock;
	}

	ret = 0;

unlock:
	vchi_service_release(instance->vchi_handle[0]);
	mutex_unlock(&instance->vchi_mutex);

	/* Stop the audio service */
	if (instance) {
		vc_vchi_audio_deinit(instance);
		alsa_stream->instance = NULL;
	}
	LOG_DBG(" .. OUT\n");
	return ret;
}
Пример #15
0
static int msm_ispif_restart_frame_boundary(struct ispif_device *ispif,
	struct msm_ispif_param_data *params)
{
	int rc = 0, i;
	long timeout = 0;
	uint16_t cid_mask;
	enum msm_ispif_intftype intftype;
	enum msm_ispif_vfe_intf vfe_intf;
	uint32_t vfe_mask = 0;
	uint32_t intf_addr;

	if (ispif->ispif_state != ISPIF_POWER_UP) {
		pr_err("%s: ispif invalid state %d\n", __func__,
			ispif->ispif_state);
		rc = -EPERM;
		return rc;
	}
	if (params->num > MAX_PARAM_ENTRIES) {
		pr_err("%s: invalid param entries %d\n", __func__,
			params->num);
		rc = -EINVAL;
		return rc;
	}

	for (i = 0; i < params->num; i++) {
		vfe_intf = params->entries[i].vfe_intf;
		if (vfe_intf >= VFE_MAX) {
			pr_err("%s: %d invalid i %d vfe_intf %d\n", __func__,
				__LINE__, i, vfe_intf);
			return -EINVAL;
		}
		vfe_mask |= (1 << vfe_intf);
	}

	/* Turn ON regulators before enabling the clocks*/
	rc = msm_ispif_set_regulator(ispif, 1);
	if (rc < 0) {
		pr_err("%s: ispif enable regulator failed", __func__);
			return -EFAULT;
	}

	rc = msm_cam_clk_enable(&ispif->pdev->dev,
		ispif_clk_info, ispif->clk,
		ispif->num_clk, 1);
	if (rc < 0) {
		pr_err("%s: cannot enable clock, error = %d",
			__func__, rc);
			goto disable_regulator;
	}

	if (vfe_mask & (1 << VFE0)) {
		init_completion(&ispif->reset_complete[VFE0]);
		/* initiate reset of ISPIF */
		msm_camera_io_w(0x00001FF9,
				ispif->base + ISPIF_RST_CMD_ADDR);
	}

	if (vfe_mask & (1 << VFE0)) {
		timeout = wait_for_completion_timeout(
			&ispif->reset_complete[VFE0], msecs_to_jiffies(500));
		if (timeout <= 0) {
			pr_err("%s: VFE0 reset wait timeout\n", __func__);
			rc = -ETIMEDOUT;
			goto disable_clk;
		}
	}

	if (ispif->hw_num_isps > 1 && (vfe_mask & (1 << VFE1))) {
			init_completion(&ispif->reset_complete[VFE1]);
			msm_camera_io_w(0x00001FF9,
				ispif->base + ISPIF_RST_CMD_1_ADDR);
	}

	if (ispif->hw_num_isps > 1  && (vfe_mask & (1 << VFE1))) {
		timeout = wait_for_completion_timeout(
				&ispif->reset_complete[VFE1],
				msecs_to_jiffies(500));
		if (timeout <= 0) {
			pr_err("%s: VFE1 reset wait timeout\n", __func__);
			rc = -ETIMEDOUT;
			goto disable_clk;
		}
	}

	pr_info("%s: ISPIF reset hw done, Restarting", __func__);
	rc = msm_cam_clk_enable(&ispif->pdev->dev,
		ispif_clk_info, ispif->clk,
		ispif->num_clk, 0);
	if (rc < 0) {
		pr_err("%s: cannot enable clock, error = %d",
			__func__, rc);
			goto disable_regulator;
	}
	/* Turn OFF regulators after disabling clocks */
	rc = msm_ispif_set_regulator(ispif, 0);
	if (rc < 0) {
		pr_err("%s: ispif disable regulator failed", __func__);
		rc = -EFAULT;
		goto end;
	}

	for (i = 0; i < params->num; i++) {
		intftype = params->entries[i].intftype;
		vfe_intf = params->entries[i].vfe_intf;

		switch (params->entries[0].intftype) {
		case PIX0:
			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
			break;
		case RDI0:
			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
			break;
		case PIX1:
			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
			break;
		case RDI1:
			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
			break;
		case RDI2:
			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
			break;
		default:
			pr_err("%s: invalid intftype=%d\n", __func__,
			params->entries[i].intftype);
			rc = -EPERM;
			goto end;
		}

		msm_ispif_intf_cmd(ispif,
			ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
	}

	for (i = 0; i < params->num; i++) {
		intftype = params->entries[i].intftype;

		vfe_intf = params->entries[i].vfe_intf;


		cid_mask = msm_ispif_get_cids_mask_from_cfg(
			&params->entries[i]);

		msm_ispif_enable_intf_cids(ispif, intftype,
			cid_mask, vfe_intf, 1);
	}
	return rc;

disable_clk:
	msm_cam_clk_enable(&ispif->pdev->dev,
		ispif_clk_info, ispif->clk,
		ispif->num_clk, 0);
disable_regulator:
	/* Turn OFF regulators */
	msm_ispif_set_regulator(ispif, 0);
end:
	return rc;
}
Пример #16
0
static int spi_imx_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	const struct of_device_id *of_id =
			of_match_device(spi_imx_dt_ids, &pdev->dev);
	struct spi_imx_master *mxc_platform_info =
			dev_get_platdata(&pdev->dev);
	struct spi_master *master;
	struct spi_imx_data *spi_imx;
	struct resource *res;
	int i, ret, irq;

	if (!np && !mxc_platform_info) {
		dev_err(&pdev->dev, "can't get the platform data\n");
		return -EINVAL;
	}

	master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
	if (!master)
		return -ENOMEM;

	platform_set_drvdata(pdev, master);

	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
	master->bus_num = np ? -1 : pdev->id;

	spi_imx = spi_master_get_devdata(master);
	spi_imx->bitbang.master = master;
	spi_imx->dev = &pdev->dev;

	spi_imx->devtype_data = of_id ? of_id->data :
		(struct spi_imx_devtype_data *)pdev->id_entry->driver_data;

	if (mxc_platform_info) {
		master->num_chipselect = mxc_platform_info->num_chipselect;
		master->cs_gpios = devm_kzalloc(&master->dev,
			sizeof(int) * master->num_chipselect, GFP_KERNEL);
		if (!master->cs_gpios)
			return -ENOMEM;

		for (i = 0; i < master->num_chipselect; i++)
			master->cs_gpios[i] = mxc_platform_info->chipselect[i];
 	}

	spi_imx->bitbang.chipselect = spi_imx_chipselect;
	spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
	spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
	spi_imx->bitbang.master->setup = spi_imx_setup;
	spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
	spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
	spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
	spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
	if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx))
		spi_imx->bitbang.master->mode_bits |= SPI_LOOP;

	init_completion(&spi_imx->xfer_done);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(spi_imx->base)) {
		ret = PTR_ERR(spi_imx->base);
		goto out_master_put;
	}
	spi_imx->base_phys = res->start;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		ret = irq;
		goto out_master_put;
	}

	ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
			       dev_name(&pdev->dev), spi_imx);
	if (ret) {
		dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
		goto out_master_put;
	}

	spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
	if (IS_ERR(spi_imx->clk_ipg)) {
		ret = PTR_ERR(spi_imx->clk_ipg);
		goto out_master_put;
	}

	spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
	if (IS_ERR(spi_imx->clk_per)) {
		ret = PTR_ERR(spi_imx->clk_per);
		goto out_master_put;
	}

	ret = clk_prepare_enable(spi_imx->clk_per);
	if (ret)
		goto out_master_put;

	ret = clk_prepare_enable(spi_imx->clk_ipg);
	if (ret)
		goto out_put_per;

	spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
	/*
	 * Only validated on i.mx6 now, can remove the constrain if validated on
	 * other chips.
	 */
	if (is_imx51_ecspi(spi_imx)) {
		ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
		if (ret == -EPROBE_DEFER)
			goto out_clk_put;

		if (ret < 0)
			dev_err(&pdev->dev, "dma setup error %d, use pio\n",
				ret);
	}

	spi_imx->devtype_data->reset(spi_imx);

	spi_imx->devtype_data->intctrl(spi_imx, 0);

	master->dev.of_node = pdev->dev.of_node;
	ret = spi_bitbang_start(&spi_imx->bitbang);
	if (ret) {
		dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
		goto out_clk_put;
	}

	if (!master->cs_gpios) {
		dev_err(&pdev->dev, "No CS GPIOs available\n");
		ret = -EINVAL;
		goto out_clk_put;
	}

	for (i = 0; i < master->num_chipselect; i++) {
		if (!gpio_is_valid(master->cs_gpios[i]))
			continue;

		ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
					DRIVER_NAME);
		if (ret) {
			dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
				master->cs_gpios[i]);
			goto out_clk_put;
		}
	}

	dev_info(&pdev->dev, "probed\n");

	clk_disable(spi_imx->clk_ipg);
	clk_disable(spi_imx->clk_per);
	return ret;

out_clk_put:
	clk_disable_unprepare(spi_imx->clk_ipg);
out_put_per:
	clk_disable_unprepare(spi_imx->clk_per);
out_master_put:
	spi_master_put(master);

	return ret;
}
Пример #17
0
/*
=====================rsz_open===========================
This function creates a channels.
*/
static int rsz_open(struct inode *inode, struct file *filp)
{
	struct imp_logical_channel *rsz_conf_chan;
	int i, mode, ret;

	if (filp->f_flags & O_NONBLOCK) {
		dev_err
		    (rsz_device,
		     "rsz_open: device cannot be opened in non-blocked mode\n");
		return -EBUSY;
	}

	mode = imp_hw_if->get_resize_oper_mode();

	ret = mutex_lock_interruptible(&rsz_dev.lock);
	if (ret)
		return ret;
	if ((mode == IMP_MODE_CONTINUOUS) ||
	    ((mode == IMP_MODE_SINGLE_SHOT) && (!imp_hw_if->serialize()))) {
		if (rsz_dev.users != 0) {
			dev_err(rsz_device,
				"\n mode doesn't allow multiple instances\n");
			mutex_unlock(&rsz_dev.lock);
			return -EBUSY;
		}
	}

	/* if usage counter is greater than maximum supported channels
	   return error */
	if (rsz_dev.users >= MAX_CHANNELS) {
		dev_err(rsz_device,
			"\n modules usage count is greater than supported ");
		mutex_unlock(&rsz_dev.lock);
		return -EBUSY;
	}

	rsz_dev.users++;
	mutex_unlock(&rsz_dev.lock);
	/* allocate     memory for a new configuration */
	rsz_conf_chan = kmalloc(sizeof(struct imp_logical_channel), GFP_KERNEL);

	if (rsz_conf_chan == NULL) {
		dev_err(rsz_device,
			"\n cannot allocate memory ro channel config");
		return -ENOMEM;
	}

	rsz_conf_chan->config_state = STATE_NOT_CONFIGURED;
	rsz_conf_chan->mode = IMP_MODE_INVALID;
	rsz_conf_chan->primary_user = 0;
	rsz_conf_chan->chained = 0;
	rsz_conf_chan->config = NULL;
	rsz_conf_chan->user_config = NULL;
	rsz_conf_chan->user_config_size = 0;

	/* Set priority to lowest for that configuration channel */
	rsz_conf_chan->priority = MIN_PRIORITY;

	/* Set the channel type to resize */
	rsz_conf_chan->type = IMP_RESIZER;

	for (i = 0; i < MAX_BUFFERS; i++) {
		rsz_conf_chan->in_bufs[i] = NULL;
		rsz_conf_chan->out_buf1s[i] = NULL;
		rsz_conf_chan->out_buf2s[i] = NULL;
	}
	rsz_conf_chan->in_numbufs = 0;
	rsz_conf_chan->out_numbuf1s = 0;
	rsz_conf_chan->out_numbuf2s = 0;

	dev_dbg(rsz_device, "Initializing	of channel done	\n");

	/* Initializing of application mutex */
	init_completion(&(rsz_conf_chan->channel_sem));
	rsz_conf_chan->channel_sem.done = 0;
	mutex_init(&(rsz_conf_chan->lock));
	/* taking the configuartion     structure in private data */
	filp->private_data = rsz_conf_chan;


	return 0;

}
static int hid_time_probe(struct platform_device *pdev)
{
	int ret = 0;
	struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
	struct hid_time_state *time_state = devm_kzalloc(&pdev->dev,
		sizeof(struct hid_time_state), GFP_KERNEL);

	if (time_state == NULL)
		return -ENOMEM;

	platform_set_drvdata(pdev, time_state);

	spin_lock_init(&time_state->lock_last_time);
	init_completion(&time_state->comp_last_time);
	time_state->common_attributes.hsdev = hsdev;
	time_state->common_attributes.pdev = pdev;

	ret = hid_sensor_parse_common_attributes(hsdev,
				HID_USAGE_SENSOR_TIME,
				&time_state->common_attributes);
	if (ret) {
		dev_err(&pdev->dev, "failed to setup common attributes!\n");
		return ret;
	}

	ret = hid_time_parse_report(pdev, hsdev, HID_USAGE_SENSOR_TIME,
					time_state);
	if (ret) {
		dev_err(&pdev->dev, "failed to setup attributes!\n");
		return ret;
	}

	time_state->callbacks.send_event = hid_time_proc_event;
	time_state->callbacks.capture_sample = hid_time_capture_sample;
	time_state->callbacks.pdev = pdev;
	ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_TIME,
					&time_state->callbacks);
	if (ret < 0) {
		dev_err(&pdev->dev, "register callback failed!\n");
		return ret;
	}

	ret = sensor_hub_device_open(hsdev);
	if (ret) {
		dev_err(&pdev->dev, "failed to open sensor hub device!\n");
		goto err_open;
	}

	/*
	 * Enable HID input processing early in order to be able to read the
	 * clock already in devm_rtc_device_register().
	 */
	hid_device_io_start(hsdev->hdev);

	time_state->rtc = devm_rtc_device_register(&pdev->dev,
					"hid-sensor-time", &hid_time_rtc_ops,
					THIS_MODULE);

	if (IS_ERR_OR_NULL(time_state->rtc)) {
		hid_device_io_stop(hsdev->hdev);
		ret = time_state->rtc ? PTR_ERR(time_state->rtc) : -ENODEV;
		time_state->rtc = NULL;
		dev_err(&pdev->dev, "rtc device register failed!\n");
		goto err_rtc;
	}

	return ret;

err_rtc:
	sensor_hub_device_close(hsdev);
err_open:
	sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
	return ret;
}
Пример #19
0
static int ar9170_usb_probe(struct usb_interface *intf,
                            const struct usb_device_id *id)
{
    struct ar9170_usb *aru;
    struct ar9170 *ar;
    struct usb_device *udev;
    int err;

    aru = ar9170_alloc(sizeof(*aru));
    if (IS_ERR(aru)) {
        err = PTR_ERR(aru);
        goto out;
    }

    udev = interface_to_usbdev(intf);
    usb_get_dev(udev);
    aru->udev = udev;
    aru->intf = intf;
    ar = &aru->common;

    aru->req_one_stage_fw = ar9170_requires_one_stage(id);

    usb_set_intfdata(intf, aru);
    SET_IEEE80211_DEV(ar->hw, &intf->dev);

    init_usb_anchor(&aru->rx_submitted);
    init_usb_anchor(&aru->tx_pending);
    init_usb_anchor(&aru->tx_submitted);
    init_completion(&aru->cmd_wait);
    spin_lock_init(&aru->tx_urb_lock);

    aru->tx_pending_urbs = 0;
    aru->tx_submitted_urbs = 0;

    aru->common.stop = ar9170_usb_stop;
    aru->common.flush = ar9170_usb_flush;
    aru->common.open = ar9170_usb_open;
    aru->common.tx = ar9170_usb_tx;
    aru->common.exec_cmd = ar9170_usb_exec_cmd;
    aru->common.callback_cmd = ar9170_usb_callback_cmd;

#ifdef CONFIG_PM
    udev->reset_resume = 1;
#endif /* CONFIG_PM */
    err = ar9170_usb_reset(aru);
    if (err)
        goto err_freehw;

    err = ar9170_usb_request_firmware(aru);
    if (err)
        goto err_freehw;

    err = ar9170_usb_init_device(aru);
    if (err)
        goto err_freefw;

    err = ar9170_usb_open(ar);
    if (err)
        goto err_unrx;

    err = ar9170_register(ar, &udev->dev);

    ar9170_usb_stop(ar);
    if (err)
        goto err_unrx;

    return 0;

err_unrx:
    ar9170_usb_cancel_urbs(aru);

err_freefw:
    release_firmware(aru->init_values);
    release_firmware(aru->firmware);

err_freehw:
    usb_set_intfdata(intf, NULL);
    usb_put_dev(udev);
    ieee80211_free_hw(ar->hw);
out:
    return err;
}
/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: memory node number.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give -1.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data, int node,
					   const char namefmt[],
					   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	create.node = node;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create_on_node);

static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, state)) {
		WARN_ON(1);
		return;
	}
	/* It's safe because the task is inactive. */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_THREAD_BOUND;
}

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
}
Пример #21
0
static int ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long args)
{
	int                    ret;
    unsigned int __user   *argp = (unsigned int __user *) args;
	unsigned long          physp;
	unsigned long          virtp;
	unsigned int           type;
	
	
	//__D("ioctl %d received. \n", cmd);
	
    switch (cmd) {
	case IPERA_INIT_PMU_STATICS:
		init_pmu_asm();
		__D("IPERA_INIT_PMU_STATICS : returning\n");
		break;

	case IPERA_START_PMU_CACHES_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));

		//set_pmu_event_asm(PMU_ICACHE_EXEC, EVENT_COUNTER0);
		//set_pmu_event_asm(PMU_ICACHE_MISS, EVENT_COUNTER1);
		//set_pmu_event_asm(PMU_DCACHE_ACCESS, EVENT_COUNTER2);
		//set_pmu_event_asm(PMU_DCACHE_MISS, EVENT_COUNTER3);
		//start_pmu_asm();
		//__D("IPERA_START_PMU_CACHES_STATICS : returning\n");
		break;

	case IPERA_END_PMU_CACHES_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));
		
		//stop_pmu_asm();
		//pmu_statics[type].pmu_count		+= 1; 
		//pmu_statics[type].pmu_cycles    += get_clock_counter_asm();
		//pmu_statics[type].pmu_instr_exec    += get_pmnx_counter_asm(EVENT_COUNTER0);
		//pmu_statics[type].pmu_icache_miss    += get_pmnx_counter_asm(EVENT_COUNTER1);
		//pmu_statics[type].pmu_dcache_access    += get_pmnx_counter_asm(EVENT_COUNTER2);
		//pmu_statics[type].pmu_dcache_miss    += get_pmnx_counter_asm(EVENT_COUNTER3);
		//__D("IPERA_END_PMU_CACHES_STATICS : returning\n");
		break;
		
	case IPERA_GET_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));
		
		//ret_get_cycles = pmu_statics[type].pmu_cycles;
		//__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_count);
		//__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_cycles);
		//__D("IPERA_GET_ICACHE_EXEC : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_instr_exec);
		//__D("IPERA_GET_ICACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_icache_miss);
		//__D("IPERA_GET_DCACHE_ACCESS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_access);
		//__D("IPERA_GET_DCACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_miss);
		//ret = copy_to_user(argp, &pmu_statics[type], sizeof(pmu_statics[type]));
		break;

	case IPERA_GET_PHYS:
		get_user(virtp, argp);
		physp = ipera_get_phys(virtp);
		put_user(physp, argp);
		//__D("IPERA_GET_PHYS : returning %#lx\n", physp);
		break;

		
#if 0
	case IPERA_GET_CYCLES:
		__D("IPERA_GET_CYCLES : received.\n");
		cur_cycles = get_cycles();
		copy_to_user(argp, &cur_cycles, sizeof(cur_cycles));
		__D("IPERA_GET_CYCLES : returning %#lx\n", cur_cycles);
		break;

	case IPERA_GET_PHYS:
		__D("IPERA_GET_PHYS : received.\n");
		get_user(virtp, argp);
		physp = get_phys(virtp);
		put_user(physp, argp);
		__D("IPERA_GET_PHYS : returning %#lx\n", physp);
		break;

    case IPERA_DMACPY:
        __D("IPERA_DMACPY : received.\n");
        if (copy_from_user(&dma, argp, sizeof(dma))) {
            return -EFAULT;
        }
        err = davinci_request_dma(DM350_DMA_CHANNEL_ANY, "EDMA memcpy", memcpy_dma_irq_handler, NULL, &master_ch, &tcc, EVENTQ_1);
        if (err < 0) {
            __E("Error in requesting Master channel %d = 0x%x\n", master_ch, err);
            return err;
        } else if(master_ch != 25)  __E("get channel %d \n", master_ch);
        davinci_stop_dma(master_ch);

        init_completion(&edmacompletion);
        davinci_set_dma_src_params(master_ch, (unsigned long) edmaparams.src, edmaparams.srcmode, edmaparams.srcfifowidth);
        davinci_set_dma_dest_params(master_ch, (unsigned long) edmaparams.dst, edmaparams.dstmode, edmaparams.dstfifowidth);
        davinci_set_dma_src_index(master_ch, edmaparams.srcbidx, edmaparams.srccidx);
        davinci_set_dma_dest_index(master_ch, edmaparams.dstbidx, edmaparams.dstcidx);
        davinci_set_dma_transfer_params(master_ch, edmaparams.acnt, edmaparams.bcnt, edmaparams.ccnt, edmaparams.bcntrld, edmaparams.syncmode);
        davinci_get_dma_params(master_ch, &paramentry);
        davinci_set_dma_params(master_ch, &paramentry);
        davinci_start_dma(master_ch);
        wait_for_completion(&edmacompletion);
        //printk("Dma completed... \n");
        davinci_stop_dma(master_ch);
        davinci_free_dma(master_ch);
        break;
#endif

    default:
        __E("Unknown ioctl received = %d.\n", cmd);
        return -EINVAL;
    }
    return 0;
}
Пример #22
0
static int set_guid_rec(struct ib_device *ibdev,
			u8 port, int index,
			struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
{
	int err;
	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	struct ib_sa_guidinfo_rec guid_info_rec;
	ib_sa_comp_mask comp_mask;
	struct ib_port_attr attr;
	struct mlx4_alias_guid_work_context *callback_context;
	unsigned long resched_delay, flags, flags1;
	struct list_head *head =
		&dev->sriov.alias_guid.ports_guid[port - 1].cb_list;

	err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
	if (err) {
		pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
			 err, port);
		return err;
	}
	/*check the port was configured by the sm, otherwise no need to send */
	if (attr.state != IB_PORT_ACTIVE) {
		pr_debug("port %d not active...rescheduling\n", port);
		resched_delay = 5 * HZ;
		err = -EAGAIN;
		goto new_schedule;
	}

	callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
	if (!callback_context) {
		err = -ENOMEM;
		resched_delay = HZ * 5;
		goto new_schedule;
	}
	callback_context->port = port;
	callback_context->dev = dev;
	callback_context->block_num = index;

	memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));

	guid_info_rec.lid = cpu_to_be16(attr.lid);
	guid_info_rec.block_num = index;

	memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
	       GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
	comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
		rec_det->guid_indexes;

	init_completion(&callback_context->done);
	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
	list_add_tail(&callback_context->list, head);
	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);

	callback_context->query_id =
		ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
					  ibdev, port, &guid_info_rec,
					  comp_mask, rec_det->method, 1000,
					  GFP_KERNEL, aliasguid_query_handler,
					  callback_context,
					  &callback_context->sa_query);
	if (callback_context->query_id < 0) {
		pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
			 "%d. will reschedule to the next 1 sec.\n",
			 callback_context->query_id);
		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
		list_del(&callback_context->list);
		kfree(callback_context);
		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
		resched_delay = 1 * HZ;
		err = -EAGAIN;
		goto new_schedule;
	}
	err = 0;
	goto out;

new_schedule:
	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
	invalidate_guid_record(dev, port, index);
	if (!dev->sriov.is_going_down) {
		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
				   resched_delay);
	}
	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);

out:
	return err;
}
Пример #23
0
static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
{
	memset(done, 0, sizeof(*done));
	atomic_set(&done->nr_todo, nr_todo);
	init_completion(&done->completion);
}
Пример #24
0
/**
 * i2c_pnx_xfer - generic transfer entry point
 * @adap:		pointer to I2C adapter structure
 * @msgs:		array of messages
 * @num:		number of messages
 *
 * Initiates the transfer
 */
static int
i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
	struct i2c_msg *pmsg;
	int rc = 0, completed = 0, i;
	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
	u32 stat = ioread32(I2C_REG_STS(alg_data));

	dev_dbg(&alg_data->adapter.dev,
		"%s(): entering: %d messages, stat = %04x.\n",
		__func__, num, ioread32(I2C_REG_STS(alg_data)));

	bus_reset_if_active(alg_data);

	/* Process transactions in a loop. */
	for (i = 0; rc >= 0 && i < num; i++) {
		u8 addr;

		pmsg = &msgs[i];
		addr = pmsg->addr;

		if (pmsg->flags & I2C_M_TEN) {
			dev_err(&alg_data->adapter.dev,
				"%s: 10 bits addr not supported!\n",
				alg_data->adapter.name);
			rc = -EINVAL;
			break;
		}

		alg_data->mif.buf = pmsg->buf;
		alg_data->mif.len = pmsg->len;
		alg_data->mif.mode = (pmsg->flags & I2C_M_RD) ?
			I2C_SMBUS_READ : I2C_SMBUS_WRITE;
		alg_data->mif.ret = 0;
		alg_data->last = (i == num - 1);

		dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
			__func__, alg_data->mif.mode, alg_data->mif.len);

		i2c_pnx_arm_timer(alg_data);

		/* initialize the completion var */
		init_completion(&alg_data->mif.complete);

		/* Enable master interrupt */
		iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_afie |
				mcntrl_naie | mcntrl_drmie,
			  I2C_REG_CTL(alg_data));

		/* Put start-code and slave-address on the bus. */
		rc = i2c_pnx_start(addr, alg_data);
		if (rc < 0)
			break;

		/* Wait for completion */
		wait_for_completion(&alg_data->mif.complete);

		if (!(rc = alg_data->mif.ret))
			completed++;
		dev_dbg(&alg_data->adapter.dev,
			"%s(): Complete, return code = %d.\n",
			__func__, rc);

		/* Clear TDI and AFI bits in case they are set. */
		if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) {
			dev_dbg(&alg_data->adapter.dev,
				"%s: TDI still set... clearing now.\n",
				alg_data->adapter.name);
			iowrite32(stat, I2C_REG_STS(alg_data));
		}
		if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_afi) {
			dev_dbg(&alg_data->adapter.dev,
				"%s: AFI still set... clearing now.\n",
				alg_data->adapter.name);
			iowrite32(stat, I2C_REG_STS(alg_data));
		}
	}

	bus_reset_if_active(alg_data);

	/* Cleanup to be sure... */
	alg_data->mif.buf = NULL;
	alg_data->mif.len = 0;

	dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x\n",
		__func__, ioread32(I2C_REG_STS(alg_data)));

	if (completed != num)
		return ((rc < 0) ? rc : -EREMOTEIO);

	return num;
}
static int semc_battery_read_adc(int channel, int *read_measurement,
				int *read_physical)
{
	struct power_supply *ps = power_supply_get_by_name(SEMC_BDATA_NAME);
	struct data_info *di = container_of(ps, struct data_info, bdata_ps);
	int ret;
	void *h;
	struct adc_chan_result adc_chan_result;
	struct completion  conv_complete_evt;

	if (!read_measurement && !read_physical)
		return -EINVAL;

	dev_dbg(di->dev, "called for %d\n", channel);
	ret = adc_channel_open(channel, &h);
	if (ret) {
		dev_err(di->dev, "couldnt open channel %d ret=%d\n",
			channel, ret);
		goto out;
	}
	init_completion(&conv_complete_evt);
	ret = adc_channel_request_conv(h, &conv_complete_evt);
	if (ret) {
		dev_err(di->dev, "couldnt request conv channel %d ret=%d\n",
			channel, ret);
		adc_channel_close(h);
		goto out;
	}
	ret = wait_for_completion_interruptible(&conv_complete_evt);
	if (ret) {
		dev_err(di->dev, "wait interrupted channel %d ret=%d\n",
			channel, ret);
		adc_channel_close(h);
		goto out;
	}
	ret = adc_channel_read_result(h, &adc_chan_result);
	if (ret) {
		dev_err(di->dev, "couldnt read result channel %d ret=%d\n",
			channel, ret);
		adc_channel_close(h);
		goto out;
	}
	ret = adc_channel_close(h);
	if (ret)
		dev_err(di->dev, "couldnt close channel %d ret=%d\n",
			channel, ret);
	if (read_measurement) {
		*read_measurement = (int)adc_chan_result.measurement;
		dev_dbg(di->dev, "done for %d measurement=%d\n",
			channel, *read_measurement);
	}
	if (read_physical) {
		*read_physical = adc_chan_result.physical;
		dev_dbg(di->dev, "done for %d physical=%d\n",
			channel, *read_physical);
	}
	return ret;
out:
	dev_dbg(di->dev, "done for %d\n", channel);
	return ret;

}
Пример #26
0
static int __devinit i2c_pnx_probe(struct platform_device *pdev)
{
	unsigned long tmp;
	int ret = 0;
	struct i2c_pnx_algo_data *alg_data;
	unsigned long freq;
	struct resource *res;
	u32 speed = I2C_PNX_SPEED_KHZ_DEFAULT * 1000;

	alg_data = kzalloc(sizeof(*alg_data), GFP_KERNEL);
	if (!alg_data) {
		ret = -ENOMEM;
		goto err_kzalloc;
	}

	platform_set_drvdata(pdev, alg_data);

	alg_data->adapter.dev.parent = &pdev->dev;
	alg_data->adapter.algo = &pnx_algorithm;
	alg_data->adapter.algo_data = alg_data;
	alg_data->adapter.nr = pdev->id;

	alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT;
#ifdef CONFIG_OF
	alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node);
	if (pdev->dev.of_node) {
		of_property_read_u32(pdev->dev.of_node, "clock-frequency",
				     &speed);
		/*
		 * At this point, it is planned to add an OF timeout property.
		 * As soon as there is a consensus about how to call and handle
		 * this, sth. like the following can be put here:
		 *
		 * of_property_read_u32(pdev->dev.of_node, "timeout",
		 *                      &alg_data->timeout);
		 */
	}
#endif
	alg_data->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(alg_data->clk)) {
		ret = PTR_ERR(alg_data->clk);
		goto out_drvdata;
	}

	init_timer(&alg_data->mif.timer);
	alg_data->mif.timer.function = i2c_pnx_timeout;
	alg_data->mif.timer.data = (unsigned long)alg_data;

	snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
		 "%s", pdev->name);

	/* Register I/O resource */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "Unable to get mem resource.\n");
		ret = -EBUSY;
		goto out_clkget;
	}
	if (!request_mem_region(res->start, I2C_PNX_REGION_SIZE,
				pdev->name)) {
		dev_err(&pdev->dev,
		       "I/O region 0x%08x for I2C already in use.\n",
		       res->start);
		ret = -ENOMEM;
		goto out_clkget;
	}

	alg_data->base = res->start;
	alg_data->ioaddr = ioremap(res->start, I2C_PNX_REGION_SIZE);
	if (!alg_data->ioaddr) {
		dev_err(&pdev->dev, "Couldn't ioremap I2C I/O region\n");
		ret = -ENOMEM;
		goto out_release;
	}

	ret = clk_enable(alg_data->clk);
	if (ret)
		goto out_unmap;

	freq = clk_get_rate(alg_data->clk);

	/*
	 * Clock Divisor High This value is the number of system clocks
	 * the serial clock (SCL) will be high.
	 * For example, if the system clock period is 50 ns and the maximum
	 * desired serial period is 10000 ns (100 kHz), then CLKHI would be
	 * set to 0.5*(f_sys/f_i2c)-2=0.5*(20e6/100e3)-2=98. The actual value
	 * programmed into CLKHI will vary from this slightly due to
	 * variations in the output pad's rise and fall times as well as
	 * the deglitching filter length.
	 */

	tmp = (freq / speed) / 2 - 2;
	if (tmp > 0x3FF)
		tmp = 0x3FF;
	iowrite32(tmp, I2C_REG_CKH(alg_data));
	iowrite32(tmp, I2C_REG_CKL(alg_data));

	iowrite32(mcntrl_reset, I2C_REG_CTL(alg_data));
	if (wait_reset(alg_data)) {
		ret = -ENODEV;
		goto out_clock;
	}
	init_completion(&alg_data->mif.complete);

	alg_data->irq = platform_get_irq(pdev, 0);
	if (alg_data->irq < 0) {
		dev_err(&pdev->dev, "Failed to get IRQ from platform resource\n");
		goto out_irq;
	}
	ret = request_irq(alg_data->irq, i2c_pnx_interrupt,
			0, pdev->name, alg_data);
	if (ret)
		goto out_clock;

	/* Register this adapter with the I2C subsystem */
	ret = i2c_add_numbered_adapter(&alg_data->adapter);
	if (ret < 0) {
		dev_err(&pdev->dev, "I2C: Failed to add bus\n");
		goto out_irq;
	}

	of_i2c_register_devices(&alg_data->adapter);

	dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n",
		alg_data->adapter.name, res->start, alg_data->irq);

	return 0;

out_irq:
	free_irq(alg_data->irq, alg_data);
out_clock:
	clk_disable(alg_data->clk);
out_unmap:
	iounmap(alg_data->ioaddr);
out_release:
	release_mem_region(res->start, I2C_PNX_REGION_SIZE);
out_clkget:
	clk_put(alg_data->clk);
out_drvdata:
	kfree(alg_data);
err_kzalloc:
	platform_set_drvdata(pdev, NULL);
	return ret;
}
Пример #27
0
static int __devinit p54spi_probe(struct spi_device *spi)
{
	struct p54s_priv *priv = NULL;
	struct ieee80211_hw *hw;
	int ret = -EINVAL;

	hw = p54_init_common(sizeof(*priv));
	if (!hw) {
		dev_err(&spi->dev, "could not alloc ieee80211_hw");
		return -ENOMEM;
	}

	priv = hw->priv;
	priv->hw = hw;
	dev_set_drvdata(&spi->dev, priv);
	priv->spi = spi;

	spi->bits_per_word = 16;
	spi->max_speed_hz = 24000000;

	ret = spi_setup(spi);
	if (ret < 0) {
		dev_err(&priv->spi->dev, "spi_setup failed");
		goto err_free_common;
	}

	ret = gpio_request(p54spi_gpio_power, "p54spi power");
	if (ret < 0) {
		dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret);
		goto err_free_common;
	}

	ret = gpio_request(p54spi_gpio_irq, "p54spi irq");
	if (ret < 0) {
		dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret);
		goto err_free_common;
	}

	gpio_direction_output(p54spi_gpio_power, 0);
	gpio_direction_input(p54spi_gpio_irq);

	ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
			  p54spi_interrupt, IRQF_DISABLED, "p54spi",
			  priv->spi);
	if (ret < 0) {
		dev_err(&priv->spi->dev, "request_irq() failed");
		goto err_free_common;
	}

	set_irq_type(gpio_to_irq(p54spi_gpio_irq),
		     IRQ_TYPE_EDGE_RISING);

	disable_irq(gpio_to_irq(p54spi_gpio_irq));

	INIT_WORK(&priv->work, p54spi_work);
	init_completion(&priv->fw_comp);
	INIT_LIST_HEAD(&priv->tx_pending);
	mutex_init(&priv->mutex);
	SET_IEEE80211_DEV(hw, &spi->dev);
	priv->common.open = p54spi_op_start;
	priv->common.stop = p54spi_op_stop;
	priv->common.tx = p54spi_op_tx;

	ret = p54spi_request_firmware(hw);
	if (ret < 0)
		goto err_free_common;

	ret = p54spi_request_eeprom(hw);
	if (ret)
		goto err_free_common;

	ret = p54_register_common(hw, &priv->spi->dev);
	if (ret)
		goto err_free_common;

	return 0;

err_free_common:
	p54_free_common(priv->hw);
	return ret;
}
Пример #28
0
static int msm_ispif_reset_hw(struct ispif_device *ispif, int release)
{
	int rc = 0, i;
	long timeout = 0;
	struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
	ispif->clk_idx = 0;

	rc = msm_ispif_get_clk_info(ispif, ispif->pdev,
		ispif_ahb_clk_info, ispif_clk_info);
	if (rc < 0) {
		pr_err("%s: msm_isp_get_clk_info() failed", __func__);
		return -EFAULT;
	}

	/* Turn ON regulators before enabling the clocks*/
	rc = msm_ispif_set_regulator(ispif, 1);
	if (rc < 0) {
		pr_err("%s: ispif enable regulator failed", __func__);
			return -EFAULT;
	}

	rc = msm_cam_clk_enable(&ispif->pdev->dev,
		ispif_clk_info, ispif->clk,
		ispif->num_clk, 1);
	if (rc < 0) {
		pr_err("%s: cannot enable clock, error = %d\n",
			__func__, rc);
		rc = msm_cam_clk_enable(&ispif->pdev->dev,
			ispif_8626_reset_clk_info, reset_clk1,
			ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
		if (rc < 0) {
			pr_err("%s: cannot enable clock, error = %d",
				__func__, rc);
		} else {
			/* This is set when device is 8x26 */
			ispif->clk_idx = 2;
		}
	} else {
		/* This is set when device is 8974 */
		ispif->clk_idx = 1;
	}

	if (release) {
		for (i = 0; i < ispif->vfe_info.num_vfe; i++) {
			msm_camera_io_w_mb(ISPIF_STOP_INTF_IMMEDIATELY,
				ispif->base + ISPIF_VFE_m_INTF_CMD_0(i));
			msm_camera_io_w_mb(ISPIF_STOP_INTF_IMMEDIATELY,
				ispif->base + ISPIF_VFE_m_INTF_CMD_1(i));
		}
		msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD,
			ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
	}
	init_completion(&ispif->reset_complete[VFE0]);
	if (ispif->hw_num_isps > 1)
		init_completion(&ispif->reset_complete[VFE1]);

	/* initiate reset of ISPIF */
	msm_camera_io_w(ISPIF_RST_CMD_MASK,
				ispif->base + ISPIF_RST_CMD_ADDR);


	timeout = wait_for_completion_timeout(
			&ispif->reset_complete[VFE0], msecs_to_jiffies(500));
	CDBG("%s: VFE0 done\n", __func__);

	if (timeout <= 0) {
		pr_err("%s: VFE0 reset wait timeout\n", __func__);
		rc = msm_cam_clk_enable(&ispif->pdev->dev,
			ispif_clk_info, ispif->clk,
			ispif->num_clk, 0);
		if (rc < 0) {
			rc = msm_cam_clk_enable(&ispif->pdev->dev,
				ispif_8626_reset_clk_info, reset_clk1,
				ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
			if (rc < 0)
				pr_err("%s: VFE0 reset wait timeout\n",
					__func__);
		}
		/* Turn OFF regulators */
		rc = msm_ispif_set_regulator(ispif, 0);
		return -ETIMEDOUT;
	}

	if (ispif->hw_num_isps > 1) {
		msm_camera_io_w(ISPIF_RST_CMD_1_MASK,
					ispif->base + ISPIF_RST_CMD_1_ADDR);
		timeout = wait_for_completion_timeout(
				&ispif->reset_complete[VFE1],
				msecs_to_jiffies(500));
		CDBG("%s: VFE1 done\n", __func__);
		if (timeout <= 0) {
			pr_err("%s: VFE1 reset wait timeout\n", __func__);
		rc = msm_cam_clk_enable(&ispif->pdev->dev,
			ispif_clk_info, ispif->clk,
			ispif->num_clk, 0);
			/* Turn OFF regulators */
			rc = msm_ispif_set_regulator(ispif, 0);
			return -ETIMEDOUT;
		}
	}

	if (ispif->clk_idx == 1) {
		rc = msm_cam_clk_enable(&ispif->pdev->dev,
			ispif_clk_info, ispif->clk,
			ispif->num_clk, 0);
		if (rc < 0) {
			pr_err("%s: cannot disable clock, error = %d",
				__func__, rc);
		}
	}

	if (ispif->clk_idx == 2) {
		rc = msm_cam_clk_enable(&ispif->pdev->dev,
			ispif_8626_reset_clk_info, reset_clk1,
			ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
		if (rc < 0) {
			pr_err("%s: cannot disable clock, error = %d",
				__func__, rc);
		}
	}

	/* Turn OFF regulators after enabling the clocks*/
	rc = msm_ispif_set_regulator(ispif, 0);
	if (rc < 0) {
		pr_err("%s: ispif disable regulator failed", __func__);
			return -EFAULT;
	}

	return rc;
}
Пример #29
0
static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd)
{
	struct fb_info *fbi;
	struct mdp3_session_data *mdp3_session;
	u32 offset;
	int bpp;
	struct mdss_panel_info *panel_info;
	int rc;

	pr_debug("mdp3_ctrl_pan_display\n");
	if (!mfd || !mfd->mdp.private1)
		return;

	panel_info = mfd->panel_info;
	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
	if (!mdp3_session || !mdp3_session->dma)
		return;

	if (mdp3_session->in_splash_screen) {
		pr_debug("continuous splash screen, IOMMU not attached\n");
		rc = mdp3_ctrl_reset(mfd);
		if (rc) {
			pr_err("fail to reset display\n");
			return;
		}
	}

	mutex_lock(&mdp3_session->lock);

	if (!mdp3_session->status) {
		pr_err("mdp3_ctrl_pan_display, display off!\n");
		goto pan_error;
	}

	fbi = mfd->fbi;

	bpp = fbi->var.bits_per_pixel / 8;
	offset = fbi->var.xoffset * bpp +
		 fbi->var.yoffset * fbi->fix.line_length;

	if (offset > fbi->fix.smem_len) {
		pr_err("invalid fb offset=%u total length=%u\n",
			offset, fbi->fix.smem_len);
		goto pan_error;
	}

	if (mfd->fbi->screen_base) {
		mdp3_ctrl_reset_countdown(mdp3_session, mfd);
		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN);
		mdp3_ctrl_clk_enable(mfd, 1);
		rc = mdp3_session->dma->update(mdp3_session->dma,
				(void *)(mfd->iova + offset),
				mdp3_session->intf);
		/* This is for the previous frame */
		if (rc < 0) {
			mdp3_ctrl_notify(mdp3_session,
				MDP_NOTIFY_FRAME_TIMEOUT);
		} else {
			if (mdp3_ctrl_get_intf_type(mfd) ==
						MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
				mdp3_ctrl_notify(mdp3_session,
					MDP_NOTIFY_FRAME_DONE);
			}
		}
		mdp3_session->dma_active = 1;
		init_completion(&mdp3_session->dma_completion);
		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
	} else {
		pr_debug("mdp3_ctrl_pan_display no memory, stop interface");
		mdp3_clk_enable(1, 0);
		mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
		mdp3_clk_enable(0, 0);
	}

	if (mdp3_session->first_commit) {
		/*wait for one frame time to ensure frame is sent to panel*/
		msleep(1000 / panel_info->mipi.frame_rate);
		mdp3_session->first_commit = false;
	}

	mdp3_session->vsync_before_commit = 0;

pan_error:
	mutex_unlock(&mdp3_session->lock);
}
static int bcm2835_audio_set_ctls_chan(bcm2835_alsa_stream_t * alsa_stream,
				       bcm2835_chip_t * chip)
{
	VC_AUDIO_MSG_T m;
	AUDIO_INSTANCE_T *instance = alsa_stream->instance;
	int32_t success;
	int ret;
	LOG_DBG(" .. IN\n");

	LOG_INFO
	    (" Setting ALSA dest(%d), volume(%d)\n", chip->dest, chip->volume);

	if(mutex_lock_interruptible(&instance->vchi_mutex))
	{
		LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",instance->num_connections);
		return -EINTR;
	}
	vchi_service_use(instance->vchi_handle[0]);

	instance->result = -1;

	m.type = VC_AUDIO_MSG_TYPE_CONTROL;
	m.u.control.dest = chip->dest;
	m.u.control.volume = chip->volume;

	/* Create the message available completion */
	init_completion(&instance->msg_avail_comp);

	/* Send the message to the videocore */
	success = vchi_msg_queue(instance->vchi_handle[0],
				 &m, sizeof m,
				 VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);

	if (success != 0) {
		LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
			__func__, success);

		ret = -1;
		goto unlock;
	}

	/* We are expecting a reply from the videocore */
	ret = wait_for_completion_interruptible(&instance->msg_avail_comp);
	if (ret) {
		LOG_ERR("%s: failed on waiting for event (status=%d)\n",
			__func__, success);
		goto unlock;
	}

	if (instance->result != 0) {
		LOG_ERR("%s: result=%d\n", __func__, instance->result);

		ret = -1;
		goto unlock;
	}

	ret = 0;

unlock:
	vchi_service_release(instance->vchi_handle[0]);
	mutex_unlock(&instance->vchi_mutex);

	LOG_DBG(" .. OUT\n");
	return ret;
}