Ejemplo n.º 1
0
int hmm_init(void)
{
	int ret;

	if (IS_MRFLD)
		ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
					 ISP_VM_START, ISP_VM_SIZE);
	else
		ret = hmm_bo_device_init(&bo_device, &sh_mmu_mfld,
					 ISP_VM_START, ISP_VM_SIZE);

	if (ret)
		v4l2_err(&atomisp_dev,
			    "hmm_bo_device_init failed.\n");

	ret = hmm_pool_register((unsigned int)dypool_enable,
						HMM_POOL_TYPE_DYNAMIC);
	if (ret)
		v4l2_err(&atomisp_dev,
			    "Failed to register dynamic memory pool.\n");

	/*
	 * As hmm use NULL to indicate invalid ISP virtual address,
	 * and ISP_VM_START is defined to 0 too, so we allocate
	 * one piece of dummy memory, which should return value 0,
	 * at the beginning, to avoid hmm_alloc return 0 in the
	 * further allocation.
	 */
	dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, 0, HMM_UNCACHED);
	return ret;
}
Ejemplo n.º 2
0
static int atomisp_pci_probe(struct pci_dev *dev,
				       const struct pci_device_id *id)
{
	const struct atomisp_platform_data *pdata;
	struct atomisp_device *isp;
	unsigned int start;
	void __iomem *base;
	int err;

	if (!dev) {
		dev_err(&dev->dev, "atomisp: error device ptr\n");
		return -EINVAL;
	}

	atomisp_pci_vendor = id->vendor;
	atomisp_pci_device = id->device;

	pdata = atomisp_get_platform_data();
	if (pdata == NULL) {
		dev_err(&dev->dev, "no platform data available\n");
		return -ENODEV;
	}

	err = pcim_enable_device(dev);
	if (err) {
		dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n",
			err);
		return err;
	}

	start = pci_resource_start(dev, ATOM_ISP_PCI_BAR);
	v4l2_dbg(1, dbg_level, &atomisp_dev, "start: 0x%x\n", start);

	err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev));
	if (err) {
		dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n",
			err);
		return err;
	}

	base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR];
	v4l2_dbg(1, dbg_level, &atomisp_dev, "base: %p\n", base);

	atomisp_io_base = base;

	v4l2_dbg(1, dbg_level, &atomisp_dev, "atomisp_io_base: %p\n",
			atomisp_io_base);

	isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL);
	if (!isp) {
		dev_err(&dev->dev, "Failed to alloc CI ISP structure\n");
		return -ENOMEM;
	}
	isp->pdev = dev;
	isp->dev = &dev->dev;
	isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
	isp->pci_root = pci_get_bus_and_slot(0, 0);
	if (!isp->pci_root) {
		dev_err(&dev->dev, "Unable to find PCI host\n");
		return -ENODEV;
	}
	isp->saved_regs.ispmmadr = start;

	mutex_init(&isp->mutex);
	mutex_init(&isp->streamoff_mutex);
	spin_lock_init(&isp->lock);
	init_completion(&isp->init_done);

	isp->media_dev.driver_version = ATOMISP_CSS_VERSION_20;

	switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
	case ATOMISP_PCI_DEVICE_SOC_MRFLD:
	case ATOMISP_PCI_DEVICE_SOC_BYT:
		isp->media_dev.hw_revision =
			(ATOMISP_HW_REVISION_ISP2400
			 << ATOMISP_HW_REVISION_SHIFT) |
#ifdef CONFIG_ISP2400
			ATOMISP_HW_STEPPING_A0;
#else
			ATOMISP_HW_STEPPING_B0;
#endif
		break;
	default:
		/* Medfield and Clovertrail. */
		isp->media_dev.hw_revision =
			(ATOMISP_HW_REVISION_ISP2300
			 << ATOMISP_HW_REVISION_SHIFT) |
			(dev->revision < 0x09 ?
			 ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0);
	}

	isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY;
	if ((pdata->spid->platform_family_id == INTEL_CLVTP_PHONE ||
	     pdata->spid->platform_family_id == INTEL_CLVT_TABLET) &&
	    isp->pdev->revision < 0x09) {
		/* Workaround for Cloverview(+) older than stepping B0 */
		isp->max_isr_latency = CSTATE_EXIT_LATENCY_C1;
	}

	/* Load isp firmware from user space */
	isp->firmware = load_firmware(&dev->dev);
	if (!isp->firmware) {
		err = -ENOENT;
		dev_err(&dev->dev, "Load firmwares failed\n");
		goto load_fw_fail;
	}

	isp->wdt_work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1);
	if (isp->wdt_work_queue == NULL) {
		dev_err(&dev->dev, "Failed to initialize wdt work queue\n");
		err = -ENOMEM;
		goto wdt_work_queue_fail;
	}
	INIT_WORK(&isp->wdt_work, atomisp_wdt_work);

	isp->delayed_init_workq =
		alloc_workqueue(isp->v4l2_dev.name, WQ_CPU_INTENSIVE, 1);
	if (isp->delayed_init_workq == NULL) {
		dev_err(&dev->dev, "Failed to initialize delayed init workq\n");
		err = -ENOMEM;
		goto delayed_init_work_queue_fail;
	}
	INIT_WORK(&isp->delayed_init_work, atomisp_delayed_init_work);

	pci_set_master(dev);
	pci_set_drvdata(dev, isp);

	err = pci_enable_msi(dev);
	if (err) {
		dev_err(&dev->dev, "Failed to enable msi (%d)\n", err);
		goto enable_msi_fail;
	}

	err = devm_request_threaded_irq(&dev->dev, dev->irq,
					atomisp_isr, atomisp_isr_thread,
					IRQF_SHARED, "isp_irq", isp);
	if (err) {
		dev_err(&dev->dev, "Failed to request irq (%d)\n", err);
		goto enable_msi_fail;
	}

	setup_timer(&isp->wdt, atomisp_wdt, (unsigned long)isp);

	atomisp_msi_irq_init(isp, dev);

	pm_qos_add_request(&isp->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);

	if (IS_ISP2400) {
		u32 reg32;
		/*
		 * for MRFLD, Software/firmware needs to write a 1 to bit 0 of
		 * the register at CSI_RECEIVER_SELECTION_REG to enable SH CSI
		 * backend write 0 will enable Arasan CSI backend, which has
		 * bugs(like sighting:4567697 and 4567699) and will be removed
		 * in B0
		 */
		atomisp_css2_hw_store_32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
		pci_read_config_dword(dev, PCI_I_CONTROL, &reg32);
		reg32 |= MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING
			| MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
		pci_write_config_dword(dev, PCI_I_CONTROL, reg32);
	}

	err = atomisp_initialize_modules(isp);
	if (err < 0) {
		dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err);
		goto enable_msi_fail;
	}

	err = atomisp_register_entities(isp);
	if (err < 0) {
		dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n",
			err);
		goto enable_msi_fail;
	}
	atomisp_acc_init(isp);

	/* save the iunit context only once after all the values are init'ed. */
	atomisp_save_iunit_reg(isp);

	pm_runtime_put_noidle(&dev->dev);
	pm_runtime_allow(&dev->dev);

	err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
	if (err)
		dev_err(&dev->dev, "Failed to register reserved memory pool.\n");

	return 0;

enable_msi_fail:
	destroy_workqueue(isp->delayed_init_workq);
delayed_init_work_queue_fail:
	destroy_workqueue(isp->wdt_work_queue);
wdt_work_queue_fail:
	release_firmware(isp->firmware);
load_fw_fail:
	pci_dev_put(isp->pci_root);
	return err;
}
Ejemplo n.º 3
0
static int __devinit atomisp_pci_probe(struct pci_dev *dev,
					const struct pci_device_id *id)
{
	struct atomisp_device *isp = NULL;
	unsigned int start, len;
	void __iomem *base = NULL;
	int err = 0;

	if (!dev) {
		v4l2_err(&atomisp_dev, "atomisp: erorr device ptr\n");
		return -EINVAL;
	}

	atomisp_pci_vendor = id->vendor;
	atomisp_pci_device = id->device;

	err = pci_enable_device(dev);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to enable CI ISP device\n");
		return err;
	}

	start = pci_resource_start(dev, 0);
	len = pci_resource_len(dev, 0);

	err = pci_request_region(dev, 0, atomisp_pci_driver.name);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to request region 0x%1x-0x%Lx\n",
			    start, (unsigned long long)pci_resource_end(dev,
				0));
		goto request_region_fail;
	}

	base = ioremap_nocache(start, len);
	if (!base) {
		v4l2_err(&atomisp_dev,
			    "Failed to I/O memory remapping\n");
		err = -ENOMEM;
		goto ioremap_fail;
	}

	isp = kzalloc(sizeof(struct atomisp_device), GFP_KERNEL);
	if (!isp) {
		v4l2_err(&atomisp_dev, "Failed to alloc CI ISP structure\n");
		goto kzalloc_fail;
	}
	isp->sw_contex.probed = false;
	isp->sw_contex.init = false;
	isp->pdev = dev;
	isp->dev = &dev->dev;
	isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
	isp->hw_contex.pci_root = pci_get_bus_and_slot(0, 0);

	/* Load isp firmware from user space */
	/*
	 * fixing me:
	 * MRFLD VP does not use firmware loading
	 * from file system
	 */
	if (!IS_MRFLD) {
		isp->firmware = load_firmware(&dev->dev);
		if (!isp->firmware) {
			v4l2_err(&atomisp_dev, "Load firmwares failed\n");
			goto load_fw_fail;
		}
	}

	err = atomisp_initialize_modules(isp);
	if (err < 0) {
		v4l2_err(&atomisp_dev, "atomisp_initialize_modules\n");
		goto init_mod_fail;
	}

	err = atomisp_register_entities(isp);
	if (err < 0) {
		v4l2_err(&atomisp_dev, "atomisp_register_entities failed\n");
		goto init_mod_fail;
	}

	init_completion(&isp->wq_frame_complete);
	init_completion(&isp->dis_state_complete);
	spin_lock_init(&isp->irq_lock);

	isp->work_queue = create_singlethread_workqueue(isp->v4l2_dev.name);
	if (isp->work_queue == NULL) {
		v4l2_err(&atomisp_dev, "Failed to initialize work queue\n");
		goto work_queue_fail;
	}
	INIT_WORK(&isp->work, atomisp_work);

	isp->hw_contex.ispmmadr = start;

	pci_set_master(dev);
	atomisp_io_base = base;

	isp->tvnorm = tvnorms;
	mutex_init(&isp->input_lock);
	/* isp_lock is to protect race access of css functions */
	mutex_init(&isp->isp_lock);
	isp->sw_contex.updating_uptr = false;
	isp->isp3a_stat_ready = false;

	pci_set_drvdata(dev, isp);

	err = pci_enable_msi(dev);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to enable msi\n");
		goto enable_msi_fail;
	}
	err = request_irq(dev->irq, atomisp_isr,
			  IRQF_SHARED, "isp_irq", isp);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to request irq\n");
		goto request_irq_fail;
	}

	setup_timer(&isp->wdt, atomisp_wdt_wakeup_dog, (unsigned long)isp);

	atomisp_msi_irq_init(isp, dev);

	pm_qos_add_request(&isp->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);
	/*
	 * fixing me!
	 * MRFLD VP does not implement
	 * PM Core
	 */
#ifdef CONFIG_PM
	if (!IS_MRFLD) {
		pm_runtime_put_noidle(&dev->dev);
		pm_runtime_allow(&dev->dev);
	}
#endif
	isp->sw_contex.probed = true;

	err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
	if (err)
		v4l2_err(&atomisp_dev,
			    "Failed to register reserved memory pool.\n");

	return 0;

request_irq_fail:
	pci_disable_msi(dev);
enable_msi_fail:
	pci_set_drvdata(dev, NULL);
	destroy_workqueue(isp->work_queue);
work_queue_fail:
	atomisp_unregister_entities(isp);
init_mod_fail:
	release_firmware(isp->firmware);
load_fw_fail:
	kfree(isp);
kzalloc_fail:
	iounmap(base);
ioremap_fail:
	pci_release_region(dev, 0);
request_region_fail:
	pci_disable_device(dev);
	return err;
}