コード例 #1
0
/*
 * The runtime_suspend/resume is pretty much similar to the legacy
 * suspend/resume with the noted exception below: The PCI core takes care of
 * taking the system through D3hot and restoring it back to D0 and so there is
 * no need to duplicate that here.
 */
static int intel_sst_runtime_suspend(struct device *dev)
{
    union config_status_reg csr;

    pr_debug("runtime_suspend called\n");
    if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
        pr_err("System already in Suspended state");
        return 0;
    }
    /*save fw context*/
    sst_save_dsp_context();
    /*Assert RESET on LPE Processor*/
    csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
    sst_drv_ctx->csr_value = csr.full;
    csr.full = csr.full | 0x2;

    /* Move the SST state to Suspended */
    mutex_lock(&sst_drv_ctx->sst_lock);
    sst_drv_ctx->sst_state = SST_SUSPENDED;
    sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
    mutex_unlock(&sst_drv_ctx->sst_lock);
    if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
        vibra_pwm_configure(false);

    flush_workqueue(sst_drv_ctx->post_msg_wq);
    flush_workqueue(sst_drv_ctx->process_msg_wq);
    flush_workqueue(sst_drv_ctx->process_reply_wq);
    return 0;
}
コード例 #2
0
static int intel_vibra_runtime_suspend(struct device *dev)
{
	struct vibra_info *info = dev_get_drvdata(dev);

	pr_debug("In %s\n", __func__);
	vibra_pwm_configure(info, false);
	return 0;
}
コード例 #3
0
static int intel_vibra_runtime_resume(struct device *dev)
{
	struct vibra_info *info = dev_get_drvdata(dev);

	pr_debug("In %s\n", __func__);
	lnw_gpio_set_alt(INTEL_PWM_ENABLE_GPIO, LNW_ALT_2);
	vibra_pwm_configure(info, true);
	return 0;
}
コード例 #4
0
static int intel_sst_runtime_resume(struct device *dev)
{
    u32 csr;

    pr_debug("runtime_resume called\n");
    if (sst_drv_ctx->sst_state != SST_SUSPENDED) {
        pr_err("SST is not in suspended state\n");
        return 0;
    }
    csr = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
    /*
     * To restore the csr_value after S0ix and S3 states.
     * The value 0x30000 is to enable LPE dram high and low addresses.
     * Reference:
     * Penwell Audio Voice Module HAS 1.61 Section - 13.12.1 -
     * CSR - Configuration and Status Register.
     */
    csr |= (sst_drv_ctx->csr_value | 0x30000);
    sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr);

    /* GPIO_PIN 12,13,74,75 needs to be configured in
     * ALT_FUNC_2 mode for SSP3 IOs
     */
    if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
        lnw_gpio_set_alt(CLV_I2S_3_CLK_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_I2S_3_FS_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_I2S_3_TXD_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_I2S_3_RXD_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_VIBRA_PWM_GPIO_PIN, LNW_ALT_2);

        vibra_pwm_configure(true);
    }

    sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
    return 0;
}
コード例 #5
0
static int __devinit intel_mid_vibra_probe(struct pci_dev *pci,
			const struct pci_device_id *pci_id)
{
	struct vibra_info *info;
	int ret = 0;

	pr_debug("Probe for DID %x\n", pci->device);

	info =  devm_kzalloc(&pci->dev, sizeof(*info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	ret = gpio_request_one(INTEL_VIBRA_ENABLE_GPIO, GPIOF_DIR_OUT,
				 "VIBRA ENABLE");
	if (ret != 0) {
		pr_err("gpio_request(%d) fails:%d\n",
			INTEL_VIBRA_ENABLE_GPIO, ret);
		goto out;
	}

	ret = gpio_request_one(INTEL_PWM_ENABLE_GPIO, GPIOF_DIR_OUT,
				  "PWM ENABLE");

	if (ret != 0) {
		pr_err("gpio_request(%d) fails:%d\n",
			INTEL_PWM_ENABLE_GPIO, ret);
		goto do_freegpio_vibra_enable;
	}

	/* Init the device */
	ret = pci_enable_device(pci);
	if (ret) {
		pr_err("device can't be enabled\n");
		goto do_freegpio_pwm;
	}
	ret = pci_request_regions(pci, INTEL_VIBRA_DRV_NAME);

	if (ret)
		goto do_disable_device;
	info->pci = pci_dev_get(pci);

	/* vibra Shim */
	info->shim =  pci_ioremap_bar(pci, 0);
	if (!info->shim) {
		pr_err("ioremap failed for vibra driver\n");
		goto do_release_regions;
	}

	/*set default value to Max */
	info->pwm.part.pwmbu = INTEL_VIBRA_MAX_BASEUNIT;
	info->pwm.part.pwmtd = INTEL_VIBRA_MAX_TIMEDIVISOR;

	info->dev = &pci->dev;
	info->name = "intel_mid:vibrator";
	mutex_init(&info->lock);

	if (vibra_register_sysfs(info) < 0) {
		pr_err("could not register sysfs files\n");
		goto do_unmap_shim;
	}
	lnw_gpio_set_alt(INTEL_PWM_ENABLE_GPIO, LNW_ALT_2);
	vibra_pwm_configure(info, true);

	pci_set_drvdata(pci, info);
	pm_runtime_allow(&pci->dev);
	pm_runtime_put_noidle(&pci->dev);
	return ret;

do_unmap_shim:
	iounmap(info->shim);
do_release_regions:
	pci_release_regions(pci);
do_disable_device:
	pci_disable_device(pci);
do_freegpio_pwm:
	gpio_free(INTEL_PWM_ENABLE_GPIO);
do_freegpio_vibra_enable:
	gpio_free(INTEL_VIBRA_ENABLE_GPIO);
out:
	return ret;
}
コード例 #6
0
/*
* intel_sst_probe - PCI probe function
*
* @pci:	PCI device structure
* @pci_id: PCI device ID structure
*
* This function is called by OS when a device is found
* This enables the device, interrupt etc
*/
static int __devinit intel_sst_probe(struct pci_dev *pci,
                                     const struct pci_device_id *pci_id)
{
    int i, ret = 0;

    pr_debug("Probe for DID %x\n", pci->device);
    mutex_lock(&drv_ctx_lock);
    if (sst_drv_ctx) {
        pr_err("Only one sst handle is supported\n");
        mutex_unlock(&drv_ctx_lock);
        return -EBUSY;
    }

    sst_drv_ctx = kzalloc(sizeof(*sst_drv_ctx), GFP_KERNEL);
    if (!sst_drv_ctx) {
        pr_err("malloc fail\n");
        mutex_unlock(&drv_ctx_lock);
        return -ENOMEM;
    }
    mutex_unlock(&drv_ctx_lock);

    sst_drv_ctx->pci_id = pci->device;

    mutex_init(&sst_drv_ctx->stream_lock);
    mutex_init(&sst_drv_ctx->sst_lock);
    mutex_init(&sst_drv_ctx->mixer_ctrl_lock);

    sst_drv_ctx->stream_cnt = 0;
    sst_drv_ctx->encoded_cnt = 0;
    sst_drv_ctx->am_cnt = 0;
    sst_drv_ctx->pb_streams = 0;
    sst_drv_ctx->cp_streams = 0;
    sst_drv_ctx->unique_id = 0;
    sst_drv_ctx->pmic_port_instance = SST_DEFAULT_PMIC_PORT;
    sst_drv_ctx->fw = NULL;
    sst_drv_ctx->fw_in_mem = NULL;

    INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
    INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, sst_post_message);
    INIT_WORK(&sst_drv_ctx->ipc_process_msg.wq, sst_process_message);
    INIT_WORK(&sst_drv_ctx->ipc_process_reply.wq, sst_process_reply);
    init_waitqueue_head(&sst_drv_ctx->wait_queue);

    sst_drv_ctx->mad_wq = create_singlethread_workqueue("sst_mad_wq");
    if (!sst_drv_ctx->mad_wq)
        goto do_free_drv_ctx;
    sst_drv_ctx->post_msg_wq = create_workqueue("sst_post_msg_wq");
    if (!sst_drv_ctx->post_msg_wq)
        goto free_mad_wq;
    sst_drv_ctx->process_msg_wq = create_workqueue("sst_process_msg_wqq");
    if (!sst_drv_ctx->process_msg_wq)
        goto free_post_msg_wq;
    sst_drv_ctx->process_reply_wq = create_workqueue("sst_proces_reply_wq");
    if (!sst_drv_ctx->process_reply_wq)
        goto free_process_msg_wq;

    for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
        sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
        sst_drv_ctx->alloc_block[i].ops_block.condition = false;
    }
    spin_lock_init(&sst_drv_ctx->ipc_spin_lock);

    sst_drv_ctx->max_streams = pci_id->driver_data;
    pr_debug("Got drv data max stream %d\n",
             sst_drv_ctx->max_streams);
    for (i = 1; i <= sst_drv_ctx->max_streams; i++) {
        struct stream_info *stream = &sst_drv_ctx->streams[i];
        INIT_LIST_HEAD(&stream->bufs);
        mutex_init(&stream->lock);
        spin_lock_init(&stream->pcm_lock);
    }
    if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
        sst_drv_ctx->mmap_mem = NULL;
        sst_drv_ctx->mmap_len = SST_MMAP_PAGES * PAGE_SIZE;
        while (sst_drv_ctx->mmap_len > 0) {
            sst_drv_ctx->mmap_mem =
                kzalloc(sst_drv_ctx->mmap_len, GFP_KERNEL);
            if (sst_drv_ctx->mmap_mem) {
                pr_debug("Got memory %p size 0x%x\n",
                         sst_drv_ctx->mmap_mem,
                         sst_drv_ctx->mmap_len);
                break;
            }
            if (sst_drv_ctx->mmap_len < (SST_MMAP_STEP*PAGE_SIZE)) {
                pr_err("mem alloc fail...abort!!\n");
                ret = -ENOMEM;
                goto free_process_reply_wq;
            }
            sst_drv_ctx->mmap_len -= (SST_MMAP_STEP * PAGE_SIZE);
            pr_debug("mem alloc failed...trying %d\n",
                     sst_drv_ctx->mmap_len);
        }
    }
    if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
        sst_drv_ctx->device_input_mixer = SST_STREAM_DEVICE_IHF
                                          | SST_INPUT_STREAM_PCM;
    }

    /* Init the device */
    ret = pci_enable_device(pci);
    if (ret) {
        pr_err("device can't be enabled\n");
        goto do_free_mem;
    }
    sst_drv_ctx->pci = pci_dev_get(pci);
    ret = pci_request_regions(pci, SST_DRV_NAME);
    if (ret)
        goto do_disable_device;
    /* map registers */
    /* SST Shim */
    sst_drv_ctx->shim_phy_add = pci_resource_start(pci, 1);
    sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
    if (!sst_drv_ctx->shim)
        goto do_release_regions;
    pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);

    /* Shared SRAM */
    sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
    if (!sst_drv_ctx->mailbox)
        goto do_unmap_shim;
    pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);

    /* IRAM */
    sst_drv_ctx->iram_base = pci_resource_start(pci, 3);
    sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
    if (!sst_drv_ctx->iram)
        goto do_unmap_sram;
    pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);

    /* DRAM */
    sst_drv_ctx->dram_base = pci_resource_start(pci, 4);
    sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
    if (!sst_drv_ctx->dram)
        goto do_unmap_iram;
    pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);

    sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
    /* Register the ISR */
    ret = request_threaded_irq(pci->irq, intel_sst_interrupt,
                               intel_sst_irq_thread, IRQF_SHARED, SST_DRV_NAME,
                               sst_drv_ctx);
    if (ret)
        goto do_unmap_dram;
    pr_debug("Registered IRQ 0x%x\n", pci->irq);

    /*Register LPE Control as misc driver*/
    ret = misc_register(&lpe_ctrl);
    if (ret) {
        pr_err("couldn't register control device\n");
        goto do_free_irq;
    }

    if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
        ret = misc_register(&lpe_dev);
        if (ret) {
            pr_err("couldn't register LPE device\n");
            goto do_free_misc;
        }
    } else if ((sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) ||
               (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)) {
        u32 csr;
        u32 csr2;
        u32 clkctl;

        /*allocate mem for fw context save during suspend*/
        sst_drv_ctx->fw_cntx = kzalloc(FW_CONTEXT_MEM, GFP_KERNEL);
        if (!sst_drv_ctx->fw_cntx) {
            ret = -ENOMEM;
            goto do_free_misc;
        }
        /*setting zero as that is valid mem to restore*/
        sst_drv_ctx->fw_cntx_size = 0;

        /*set lpe start clock and ram size*/
        csr = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
        csr |= 0x30000;
        /*make sure clksel set to OSC for SSP0,1 (default)*/
        csr &= 0xFFFFFFF3;
        sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr);

        /*set clock output enable for SSP0,1,3*/
        clkctl = sst_shim_read(sst_drv_ctx->shim, SST_CLKCTL);
        if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
            clkctl |= (0x7 << 16);
        else
            clkctl |= ((1<<16)|(1<<17));
        sst_shim_write(sst_drv_ctx->shim, SST_CLKCTL, clkctl);

        /* set SSP0 & SSP1 disable DMA Finish*/
        csr2 = sst_shim_read(sst_drv_ctx->shim, SST_CSR2);
        /*set SSP3 disable DMA finsh for SSSP3 */
        csr2 |= BIT(1)|BIT(2);
        sst_shim_write(sst_drv_ctx->shim, SST_CSR2, csr2);
    }

    /* GPIO_PIN 12,13,74,75 needs to be configured in
     * ALT_FUNC_2 mode for SSP3 IOs
     */
    if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
        lnw_gpio_set_alt(CLV_I2S_3_CLK_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_I2S_3_FS_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_I2S_3_TXD_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_I2S_3_RXD_GPIO_PIN, LNW_ALT_2);
        lnw_gpio_set_alt(CLV_VIBRA_PWM_GPIO_PIN, LNW_ALT_2);

        vibra_pwm_configure(true);
    }

    sst_drv_ctx->lpe_stalled = 0;
    pci_set_drvdata(pci, sst_drv_ctx);
    pm_runtime_allow(&pci->dev);
    pm_runtime_put_noidle(&pci->dev);
    register_sst(&pci->dev);

    sst_drv_ctx->qos = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL);
    if (!sst_drv_ctx->qos)
        goto do_free_misc;
    pm_qos_add_request(sst_drv_ctx->qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);

    pr_info("%s successfully done!\n", __func__);
    return ret;

do_free_misc:
    misc_deregister(&lpe_ctrl);
do_free_irq:
    free_irq(pci->irq, sst_drv_ctx);
do_unmap_dram:
    iounmap(sst_drv_ctx->dram);
do_unmap_iram:
    iounmap(sst_drv_ctx->iram);
do_unmap_sram:
    iounmap(sst_drv_ctx->mailbox);
do_unmap_shim:
    iounmap(sst_drv_ctx->shim);
do_release_regions:
    pci_release_regions(pci);
do_disable_device:
    pci_disable_device(pci);
do_free_mem:
    kfree(sst_drv_ctx->mmap_mem);
free_process_reply_wq:
    destroy_workqueue(sst_drv_ctx->process_reply_wq);
free_process_msg_wq:
    destroy_workqueue(sst_drv_ctx->process_msg_wq);
free_post_msg_wq:
    destroy_workqueue(sst_drv_ctx->post_msg_wq);
free_mad_wq:
    destroy_workqueue(sst_drv_ctx->mad_wq);
do_free_drv_ctx:
    kfree(sst_drv_ctx);
    sst_drv_ctx = NULL;
    pr_err("Probe failed with %d\n", ret);
    return ret;
}