/*
 * rtc6213n_set_chan - set the channel
 */
static int rtc6213n_set_chan(struct rtc6213n_device *radio, unsigned short chan)
{
	int retval;
	unsigned long timeout;
	bool timed_out = 0;
	unsigned short current_chan =
		radio->registers[CHANNEL] & CHANNEL_CSR0_CH;


	dev_info(&radio->videodev->dev, "======== rtc6213n_set_chan ========\n");
	dev_info(&radio->videodev->dev, "RTC6213n tuning process is starting\n");
	dev_info(&radio->videodev->dev, "CHAN=0x%4.4hx SKCFG1=0x%4.4hx STATUS=0x%4.4hx chan=0x%4.4hx\n",
			radio->registers[CHANNEL], radio->registers[SEEKCFG1],
			radio->registers[STATUS], chan);

    /* start tuning */
	radio->registers[CHANNEL] &= ~CHANNEL_CSR0_CH;
	radio->registers[CHANNEL] |= CHANNEL_CSR0_TUNE | chan;
	retval = rtc6213n_set_register(radio, CHANNEL);
	if (retval < 0)	{
		radio->registers[CHANNEL] = current_chan;
		goto done;
	}

    /* currently I2C driver only uses interrupt way to tune */
	if (radio->stci_enabled) {
		INIT_COMPLETION(radio->completion);

		/* wait till tune operation has completed */
		retval = wait_for_completion_timeout(&radio->completion,
			msecs_to_jiffies(tune_timeout));
		if (!retval) {
			dev_info(&radio->videodev->dev, "rtc6213n_set_chan : timed_out\n");
			timeout = true;
		}
		retval = rtc6213n_get_register(radio, STATUS);
		if (retval < 0)
			goto stop;
	} else {
		/* wait till tune operation has completed */
		timeout = jiffies + msecs_to_jiffies(tune_timeout);
		do {
			retval = rtc6213n_get_all_registers(radio);
			if (retval < 0)
				goto stop;
			timed_out = time_after(jiffies, timeout);
		} while (((radio->registers[STATUS] & STATUS_STD) == 0)
			&& (!timed_out));
	}
	dev_info(&radio->videodev->dev, "RTC6213n tuning process is done\n");
	dev_info(&radio->videodev->dev, "CHAN=0x%4.4hx SKCFG1=0x%4.4hx STATUS=0x%4.4hx, STD = %d, SF = %d, RSSI = %d\n",
		radio->registers[CHANNEL], radio->registers[SEEKCFG1],
		radio->registers[STATUS],
		(radio->registers[STATUS] & STATUS_STD) >> 14,
		(radio->registers[STATUS] & STATUS_SF) >> 13,
		(radio->registers[RSSI] & RSSI_RSSI));

	if ((radio->registers[STATUS] & STATUS_STD) == 0)
		dev_info(&radio->videodev->dev, "tune does not complete\n");
	if (timed_out)
		dev_info(&radio->videodev->dev, "tune timed out after %u ms\n",
			tune_timeout);

stop:
	/* stop tuning */
	current_chan = radio->registers[CHANNEL] & CHANNEL_CSR0_CH;

	radio->registers[CHANNEL] &= ~CHANNEL_CSR0_TUNE;
	retval = rtc6213n_set_register(radio, CHANNEL);
	if (retval < 0)	{
		radio->registers[CHANNEL] = current_chan;
		goto done;
	}

	retval = rtc6213n_get_register(radio, STATUS);
	if (retval < 0)
		goto done;

done:
	dev_info(&radio->videodev->dev, "rtc6213n_set_chans is done\n");
	dev_info(&radio->videodev->dev, "CHANNEL=0x%4.4hx SEEKCFG1=0x%4.4hx STATUS=0x%4.4hx\n",
		radio->registers[CHANNEL], radio->registers[SEEKCFG1],
		radio->registers[STATUS]);
	dev_info(&radio->videodev->dev, "========= rtc6213n_set_chan End ==========\n");

	return retval;
}
Exemplo n.º 2
0
long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
				unsigned long arg)
{
	int status, ret = 0;

	if (_IOC_TYPE(cmd) != CHARM_CODE) {
		pr_err("%s: invalid ioctl code\n", __func__);
		return -EINVAL;
	}

	pr_debug("%s: Entering ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
	switch (cmd) {
	case WAKE_CHARM:
		pr_info("%s: Powering on mdm\n", __func__);
		mdm_drv->mdm_ready = 0;	
		mdm_drv->mdm_hsic_reconnectd = 0;
		mdm_drv->ops->power_on_mdm_cb(mdm_drv);
		break;
	case CHECK_FOR_BOOT:
		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
			put_user(1, (unsigned long __user *) arg);
		else
			put_user(0, (unsigned long __user *) arg);
		break;
	case NORMAL_BOOT_DONE:
		{
			int ret_mdm_hsic_reconnectd = 0;
			pr_debug("%s: check if mdm is booted up\n", __func__);
			get_user(status, (unsigned long __user *) arg);
			if (status) {
				pr_debug("%s: normal boot failed\n", __func__);
				mdm_drv->mdm_boot_status = -EIO;
			} else {
				pr_info("%s: normal boot done\n", __func__);
				mdm_drv->mdm_boot_status = 0;
			}
			
	                mdm_status_change_notified = false;
	                queue_work_on(0, mdm_gpio_monitor_queue, &mdm_status_check_work);
	                
			mdm_drv->mdm_ready = 1;

			if (mdm_drv->ops->normal_boot_done_cb != NULL)
				mdm_drv->ops->normal_boot_done_cb(mdm_drv);

			ret_mdm_hsic_reconnectd = mdm_hsic_reconnectd_check_fn();

			if ( ret_mdm_hsic_reconnectd == 1 ) {
				pr_info("%s: ret_mdm_hsic_reconnectd == 1\n", __func__);
			} else {
				pr_info("%s: ret_mdm_hsic_reconnectd == 0\n", __func__);
			}

			if (!first_boot)
				complete(&mdm_boot);
			else
				first_boot = 0;
		}
		break;
	case RAM_DUMP_DONE:
		pr_debug("%s: mdm done collecting RAM dumps\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status)
			mdm_drv->mdm_ram_dump_status = -EIO;
		else {
			pr_info("%s: ramdump collection completed\n", __func__);
			mdm_drv->mdm_ram_dump_status = 0;
		}
		complete(&mdm_ram_dumps);
		break;
	case WAIT_FOR_RESTART:
		pr_debug("%s: wait for mdm to need images reloaded\n",
				__func__);

		if (mdm_drv) {
	        dump_gpio("MDM2AP_STATUS", mdm_drv->mdm2ap_status_gpio);
	        dump_gpio("MDM2AP_ERRFATAL", mdm_drv->mdm2ap_errfatal_gpio);
		}

		ret = wait_for_completion_interruptible(&mdm_needs_reload);
		if (!ret && mdm_drv) {
			put_user(mdm_drv->boot_type,
					 (unsigned long __user *) arg);

			pr_err("%s: mdm_drv->boot_type:%d\n", __func__, mdm_drv->boot_type);
		}
		INIT_COMPLETION(mdm_needs_reload);
		break;
	case GET_MFG_MODE:
		pr_info("%s: board_mfg_mode()=%d\n", __func__, board_mfg_mode());
		put_user(board_mfg_mode(),
				 (unsigned long __user *) arg);
		break;
	case SET_MODEM_ERRMSG:
		pr_info("%s: Set modem fatal errmsg\n", __func__);
		ret = set_mdm_errmsg((void __user *) arg);
		break;
	case GET_RADIO_FLAG:
		pr_info("%s:get_radio_flag()=%x\n", __func__, get_radio_flag());

		
		if ((get_radio_flag() & RADIO_FLAG_USB_UPLOAD) && mdm_drv != NULL) {
			pr_info("AP2MDM_STATUS GPIO:%d\n", mdm_drv->ap2mdm_status_gpio);
			pr_info("AP2MDM_ERRFATAL GPIO:%d\n", mdm_drv->ap2mdm_errfatal_gpio);
			pr_info("AP2MDM_PMIC_RESET_N GPIO:%d\n", mdm_drv->ap2mdm_pmic_reset_n_gpio);
			pr_info("MDM2AP_STATUS GPIO:%d\n", mdm_drv->mdm2ap_status_gpio);
			pr_info("MDM2AP_ERRFATAL GPIO:%d\n", mdm_drv->mdm2ap_errfatal_gpio);
			pr_info("MDM2AP_HSIC_READY GPIO:%d\n", mdm_drv->mdm2ap_hsic_ready_gpio);
			pr_info("AP2MDM_IPC1 GPIO:%d\n", mdm_drv->ap2mdm_ipc1_gpio);
		}
		

		put_user(get_radio_flag(),
				 (unsigned long __user *) arg);
		break;
	case EFS_SYNC_DONE:
		pr_info("%s: efs sync is done\n", __func__);
		break;
	case NV_WRITE_DONE:
		pr_info("%s: NV write done!\n", __func__);
		notify_mdm_nv_write_done();
		break;
	case HTC_POWER_OFF_CHARM:
		pr_info("%s: (HTC_POWER_OFF_CHARM)Powering off mdm\n", __func__);
		if ( mdm_drv->ops->htc_power_down_mdm_cb ) {
			mdm_drv->mdm_ready = 0;
			mdm_drv->mdm_hsic_reconnectd = 0;
			mdm_drv->ops->htc_power_down_mdm_cb(mdm_drv);
		}
		break;
	case HTC_UPDATE_CRC_RESTART_LEVEL:
		pr_info("%s: (HTC_UPDATE_CRC_RESTART_LEVEL)\n", __func__);
		subsystem_update_restart_level_for_crc();
		break;
	default:
		pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
		ret = -EINVAL;
		break;
	}

	return ret;
}
Exemplo n.º 3
0
static int omap2_onenand_wait(struct mtd_info *mtd, int state)
{
	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
	unsigned int intr = 0;
	unsigned int ctrl;
	unsigned long timeout;
	u32 syscfg;

	if (state == FL_RESETING) {
		int i;

		for (i = 0; i < 20; i++) {
			udelay(1);
			intr = read_reg(c, ONENAND_REG_INTERRUPT);
			if (intr & ONENAND_INT_MASTER)
				break;
		}
		ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
		if (ctrl & ONENAND_CTRL_ERROR) {
			wait_err("controller error", state, ctrl, intr);
			return -EIO;
		}
		if (!(intr & ONENAND_INT_RESET)) {
			wait_err("timeout", state, ctrl, intr);
			return -EIO;
		}
		return 0;
	}

	if (state != FL_READING) {
		int result;

		/* Turn interrupts on */
		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
		if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
			syscfg |= ONENAND_SYS_CFG1_IOBE;
			write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
			if (cpu_is_omap34xx())
				/* Add a delay to let GPIO settle */
				syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
		}

		INIT_COMPLETION(c->irq_done);
		if (c->gpio_irq) {
			result = gpio_get_value(c->gpio_irq);
			if (result == -1) {
				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
				intr = read_reg(c, ONENAND_REG_INTERRUPT);
				wait_err("gpio error", state, ctrl, intr);
				return -EIO;
			}
		} else
			result = 0;
		if (result == 0) {
			int retry_cnt = 0;
retry:
			/*
			 * Block sleep while OneNAND is writing, to prevent PM
			 * from putting OneNAND's power regulator to sleep.
			 */
			omap2_block_sleep();
			result = wait_for_completion_timeout(&c->irq_done,
						    msecs_to_jiffies(20));
			omap2_allow_sleep();
			if (result == 0) {
				/* Timeout after 20ms */
				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
				if (ctrl & ONENAND_CTRL_ONGO) {
					/*
					 * The operation seems to be still going
					 * so give it some more time.
					 */
					retry_cnt += 1;
					if (retry_cnt < 3)
						goto retry;
					intr = read_reg(c,
							ONENAND_REG_INTERRUPT);
					wait_err("timeout", state, ctrl, intr);
					return -EIO;
				}
				intr = read_reg(c, ONENAND_REG_INTERRUPT);
				if ((intr & ONENAND_INT_MASTER) == 0)
					wait_warn("timeout", state, ctrl, intr);
			}
		}
	} else {
		int retry_cnt = 0;

		/* Turn interrupts off */
		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
		syscfg &= ~ONENAND_SYS_CFG1_IOBE;
		write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);

		timeout = jiffies + msecs_to_jiffies(20);
		while (1) {
			if (time_before(jiffies, timeout)) {
				intr = read_reg(c, ONENAND_REG_INTERRUPT);
				if (intr & ONENAND_INT_MASTER)
					break;
			} else {
				/* Timeout after 20ms */
				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
				if (ctrl & ONENAND_CTRL_ONGO) {
					/*
					 * The operation seems to be still going
					 * so give it some more time.
					 */
					retry_cnt += 1;
					if (retry_cnt < 3) {
						timeout = jiffies +
							  msecs_to_jiffies(20);
						continue;
					}
				}
				break;
			}
		}
	}

	intr = read_reg(c, ONENAND_REG_INTERRUPT);
	ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);

	if (intr & ONENAND_INT_READ) {
		int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);

		if (ecc) {
			unsigned int addr1, addr8;

			addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
			addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
			if (ecc & ONENAND_ECC_2BIT_ALL) {
				printk(KERN_ERR "onenand_wait: ECC error = "
				       "0x%04x, addr1 %#x, addr8 %#x\n",
				       ecc, addr1, addr8);
				mtd->ecc_stats.failed++;
				return -EBADMSG;
			} else if (ecc & ONENAND_ECC_1BIT_ALL) {
				printk(KERN_NOTICE "onenand_wait: correctable "
				       "ECC error = 0x%04x, addr1 %#x, "
				       "addr8 %#x\n", ecc, addr1, addr8);
				mtd->ecc_stats.corrected++;
			}
		}
	} else if (state == FL_READING) {
		wait_err("timeout", state, ctrl, intr);
		return -EIO;
	}

	if (ctrl & ONENAND_CTRL_ERROR) {
		wait_err("controller error", state, ctrl, intr);
		if (ctrl & ONENAND_CTRL_LOCK)
			printk(KERN_ERR "onenand_wait: "
					"Device is write protected!!!\n");
		return -EIO;
	}

	if (ctrl & 0xFE9F)
		wait_warn("unexpected controller status", state, ctrl, intr);

	return 0;
}
static int __devinit omap34xx_bridge_probe(struct platform_device *pdev)
{
	int status;
	u32 initStatus;
	u32 temp;
	dev_t   dev = 0 ;
	int     result;
	struct dspbridge_platform_data *pdata = pdev->dev.platform_data;

	omap_dspbridge_dev = pdev;

	/* use 2.6 device model */
	result = alloc_chrdev_region(&dev, 0, 1, driver_name);
	if (result < 0) {
		pr_err("%s: Can't get major %d\n", __func__, driver_major);
		goto err1;
	}

	driver_major = MAJOR(dev);

	cdev_init(&bridge_cdev, &bridge_fops);
	bridge_cdev.owner = THIS_MODULE;

	status = cdev_add(&bridge_cdev, dev, 1);
	if (status) {
		pr_err("%s: Failed to add bridge device\n", __func__);
		goto err2;
	}

	/* udev support */
	bridge_class = class_create(THIS_MODULE, "ti_bridge");

	if (IS_ERR(bridge_class))
		pr_err("%s: Error creating bridge class\n", __func__);

	device_create(bridge_class, NULL, MKDEV(driver_major, 0),
			NULL, "DspBridge");

	bridge_create_sysfs();

	GT_init();
	GT_create(&driverTrace, "LD");

#ifdef CONFIG_BRIDGE_DEBUG
	if (GT_str)
		GT_set(GT_str);
#elif defined(DDSP_DEBUG_PRODUCT) && GT_TRACE
	GT_set("**=67");
#endif

#ifdef CONFIG_PM
	/* Initialize the wait queue */
	bridge_suspend_data.suspended = 0;
	init_waitqueue_head(&bridge_suspend_data.suspend_wq);
#endif

	SERVICES_Init();

	/*  Autostart flag.  This should be set to true if the DSP image should
	 *  be loaded and run during bridge module initialization  */

	if (base_img) {
		temp = true;
		REG_SetValue(AUTOSTART, (u8 *)&temp, sizeof(temp));
		REG_SetValue(DEFEXEC, (u8 *)base_img, strlen(base_img) + 1);
	} else {
		temp = false;
		REG_SetValue(AUTOSTART, (u8 *)&temp, sizeof(temp));
		REG_SetValue(DEFEXEC, (u8 *) "\0", (u32)2);
	}

	if (shm_size >= 0x10000) {	/* 64 KB */
		initStatus = REG_SetValue(SHMSIZE, (u8 *)&shm_size,
				sizeof(shm_size));
	} else {
		initStatus = DSP_EINVALIDARG;
		status = -1;
		pr_err("%s: SHM size must be at least 64 KB\n", __func__);
	}
	GT_1trace(driverTrace, GT_7CLASS,
		 "requested shm_size = 0x%x\n", shm_size);

	if (pdata->phys_mempool_base && pdata->phys_mempool_size) {
		phys_mempool_base = pdata->phys_mempool_base;
		phys_mempool_size = pdata->phys_mempool_size;
	}

	GT_1trace(driverTrace, GT_7CLASS, "phys_mempool_base = 0x%x \n",
		 phys_mempool_base);

	GT_1trace(driverTrace, GT_7CLASS, "phys_mempool_size = 0x%x\n",
		 phys_mempool_base);

	if ((phys_mempool_base > 0x0) && (phys_mempool_size > 0x0))
		MEM_ExtPhysPoolInit(phys_mempool_base, phys_mempool_size);
	if (tc_wordswapon) {
		GT_0trace(driverTrace, GT_7CLASS, "TC Word Swap is enabled\n");
		REG_SetValue(TCWORDSWAP, (u8 *)&tc_wordswapon,
				sizeof(tc_wordswapon));
	} else {
		GT_0trace(driverTrace, GT_7CLASS, "TC Word Swap is disabled\n");
		REG_SetValue(TCWORDSWAP, (u8 *)&tc_wordswapon,
				sizeof(tc_wordswapon));
	}
	if (DSP_SUCCEEDED(initStatus)) {
#ifdef CONFIG_BRIDGE_DVFS
		clk_handle = clk_get(NULL, "iva2_ck");
		if (!clk_handle)
			pr_err("%s: clk_get failed to get iva2_ck\n", __func__);

		if (clk_notifier_register(clk_handle, &iva_clk_notifier))
			pr_err("%s: clk_notifier_register failed for iva2_ck\n",
								__func__);

		if (!min_dsp_freq)
			min_dsp_freq = pdata->mpu_min_speed;
#endif
		driverContext = DSP_Init(&initStatus);
		if (DSP_FAILED(initStatus)) {
			status = -1;
			pr_err("DSP Bridge driver initialization failed\n");
		} else {
			pr_info("DSP Bridge driver loaded\n");
		}
	}
#ifdef CONFIG_BRIDGE_RECOVERY
	bridge_rec_queue = create_workqueue("bridge_rec_queue");
	INIT_WORK(&bridge_recovery_work, bridge_recover);
	INIT_COMPLETION(bridge_comp);
#endif

	DBC_Assert(status == 0);
	DBC_Assert(DSP_SUCCEEDED(initStatus));

	return 0;

err2:
	unregister_chrdev_region(dev, 1);
err1:
	return result;
}
Exemplo n.º 5
0
static void mdp4_dsi_cmd_do_blt(struct msm_fb_data_type *mfd, int enable)
{
	unsigned long flags;
	int cndx = 0;
	struct vsycn_ctrl *vctrl;
	struct mdp4_overlay_pipe *pipe;
	int need_wait = 0;

	vctrl = &vsync_ctrl_db[cndx];
	pipe = vctrl->base_pipe;

	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);

	if (mfd->ov0_wb_buf->write_addr == 0) {
		pr_err("%s: no blt_base assigned\n", __func__);
		return;
	}

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (enable && pipe->ov_blt_addr == 0) {
		vctrl->blt_change++;
		if (vctrl->dmap_koff != vctrl->dmap_done) {
			INIT_COMPLETION(vctrl->dmap_comp);
			need_wait = 1;
		}
	} else if (enable == 0 && pipe->ov_blt_addr) {
		vctrl->blt_change++;
		if (vctrl->ov_koff != vctrl->dmap_done) {
			INIT_COMPLETION(vctrl->dmap_comp);
			need_wait = 1;
		}
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	if (need_wait)
		mdp4_dsi_cmd_wait4dmap(0);

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (enable && pipe->ov_blt_addr == 0) {
		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
		pipe->ov_cnt = 0;
		pipe->dmap_cnt = 0;
		vctrl->ov_koff = vctrl->dmap_koff;
		vctrl->ov_done = vctrl->dmap_done;
		vctrl->blt_free = 0;
		vctrl->blt_wait = 0;
		vctrl->blt_end = 0;
		mdp4_stat.blt_dsi_video++;
	} else if (enable == 0 && pipe->ov_blt_addr) {
		pipe->ov_blt_addr = 0;
		pipe->dma_blt_addr =  0;
		vctrl->blt_end = 1;
		vctrl->blt_free = 4;	/* 4 commits to free wb buf */
	}

	pr_debug("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
		vctrl->blt_change, enable, (int)pipe->ov_blt_addr);

	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
}
Exemplo n.º 6
0
long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
				unsigned long arg)
{
	int status, ret = 0;

	if (_IOC_TYPE(cmd) != CHARM_CODE) {
		pr_err("%s: invalid ioctl code\n", __func__);
		return -EINVAL;
	}

	pr_debug("%s: Entering ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
	switch (cmd) {
	case WAKE_CHARM:
		pr_info("%s: Powering on mdm\n", __func__);
		mdm_drv->ops->power_on_mdm_cb(mdm_drv);
		break;
	case CHECK_FOR_BOOT:
		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
			put_user(1, (unsigned long __user *) arg);
		else
			put_user(0, (unsigned long __user *) arg);
		break;
	case NORMAL_BOOT_DONE:
		pr_info("%s: check if mdm is booted up\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status) {
			pr_debug("%s: normal boot failed\n", __func__);
			mdm_drv->mdm_boot_status = -EIO;
		} else {
			pr_info("%s: normal boot done\n", __func__);
			mdm_drv->mdm_boot_status = 0;
		}
		mdm_drv->mdm_ready = 1;

		if (mdm_drv->ops->normal_boot_done_cb != NULL)
			mdm_drv->ops->normal_boot_done_cb(mdm_drv);

		if (!first_boot)
			complete(&mdm_boot);
		else
			first_boot = 0;

		complete(&mdm_boot);
		break;
	case RAM_DUMP_DONE:
		pr_info("%s: mdm done collecting RAM dumps\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status)
			mdm_drv->mdm_ram_dump_status = -EIO;
		else {
			pr_info("%s: ramdump collection completed\n", __func__);
			mdm_drv->mdm_ram_dump_status = 0;
			msleep(1000);
			panic("CP Crash %s", mdm_read_err_report());
		}
		complete(&mdm_ram_dumps);
		break;
	case WAIT_FOR_RESTART:
		pr_info("%s: wait for mdm to need images reloaded\n",
				__func__);
		ret = wait_for_completion_interruptible(&mdm_needs_reload);
		if (!ret)
			put_user(mdm_drv->boot_type,
					 (unsigned long __user *) arg);
		INIT_COMPLETION(mdm_needs_reload);
		break;

	case SILENT_RESET_CONTROL:
		pr_info("%s: mdm doing silent reset\n", __func__);
		mdm_drv->mdm_ram_dump_status = 0;
		complete(&mdm_ram_dumps);
		break;

	default:
		pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
		ret = -EINVAL;
		break;
	}

	return ret;
}
Exemplo n.º 7
0
/**
 * omap3_bridge_startup() - perform low lever initializations
 * @pdev:      pointer to platform device
 *
 * Initializes recovery, PM and DVFS required data, before calling
 * clk and memory init routines.
 */
static int omap3_bridge_startup(struct platform_device *pdev)
{
	struct omap_dsp_platform_data *pdata = pdev->dev.platform_data;
	struct drv_data *drv_datap = NULL;
	u32 phys_membase, phys_memsize;
	int err;

#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
	bridge_rec_queue = create_workqueue("bridge_rec_queue");
	INIT_WORK(&bridge_recovery_work, bridge_recover);
	INIT_COMPLETION(bridge_comp);
#endif

#ifdef CONFIG_PM
	/* Initialize the wait queue */
	bridge_suspend_data.suspended = 0;
	init_waitqueue_head(&bridge_suspend_data.suspend_wq);

#ifdef CONFIG_TIDSPBRIDGE_DVFS
	for (i = 0; i < 6; i++)
		pdata->mpu_speed[i] = vdd1_rate_table_bridge[i].rate;

	err = cpufreq_register_notifier(&iva_clk_notifier,
					CPUFREQ_TRANSITION_NOTIFIER);
	if (err)
		pr_err("%s: clk_notifier_register failed for iva2_ck\n",
								__func__);
#endif
#endif

	dsp_clk_init();

	drv_datap = kzalloc(sizeof(struct drv_data), GFP_KERNEL);
	if (!drv_datap) {
		err = -ENOMEM;
		goto err1;
	}

	drv_datap->shm_size = shm_size;
	drv_datap->tc_wordswapon = tc_wordswapon;

	if (base_img) {
		drv_datap->base_img = kmalloc(strlen(base_img) + 1, GFP_KERNEL);
		if (!drv_datap->base_img) {
			err = -ENOMEM;
			goto err2;
		}
		strncpy(drv_datap->base_img, base_img, strlen(base_img) + 1);
	}

	dev_set_drvdata(bridge, drv_datap);

	if (shm_size < 0x10000) {	/* 64 KB */
		err = -EINVAL;
		pr_err("%s: shm size must be at least 64 KB\n", __func__);
		goto err3;
	}
	dev_dbg(bridge, "%s: requested shm_size = 0x%x\n", __func__, shm_size);

	phys_membase = pdata->phys_mempool_base;
	phys_memsize = pdata->phys_mempool_size;
	if (phys_membase > 0 && phys_memsize > 0)
		mem_ext_phys_pool_init(phys_membase, phys_memsize);

	if (tc_wordswapon)
		dev_dbg(bridge, "%s: TC Word Swap is enabled\n", __func__);

	driver_context = dsp_init(&err);
	if (err) {
		pr_err("DSP Bridge driver initialization failed\n");
		goto err4;
	}

	return 0;

err4:
	mem_ext_phys_pool_release();
err3:
	kfree(drv_datap->base_img);
err2:
	kfree(drv_datap);
err1:
#ifdef CONFIG_TIDSPBRIDGE_DVFS
	cpufreq_unregister_notifier(&iva_clk_notifier,
					CPUFREQ_TRANSITION_NOTIFIER);
#endif
	dsp_clk_exit();

	return err;
}
Exemplo n.º 8
0
static int at91_do_twi_transfer(struct at91_twi_dev *dev)
{
	int ret;
	bool has_unre_flag = dev->pdata->has_unre_flag;

	dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
		(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);

	INIT_COMPLETION(dev->cmd_complete);
	dev->transfer_status = 0;

	if (!dev->buf_len) {
		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
		at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
	} else if (dev->msg->flags & I2C_M_RD) {
		unsigned start_flags = AT91_TWI_START;

		if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
			dev_err(dev->dev, "RXRDY still set!");
			at91_twi_read(dev, AT91_TWI_RHR);
		}

		/* if only one byte is to be read, immediately stop transfer */
		if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
			start_flags |= AT91_TWI_STOP;
		at91_twi_write(dev, AT91_TWI_CR, start_flags);
		/*
		 * When using dma, the last byte has to be read manually in
		 * order to not send the stop command too late and then
		 * to receive extra data. In practice, there are some issues
		 * if you use the dma to read n-1 bytes because of latency.
		 * Reading n-2 bytes with dma and the two last ones manually
		 * seems to be the best solution.
		 */
		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
			at91_twi_read_data_dma(dev);
			/*
			 * It is important to enable TXCOMP irq here because
			 * doing it only when transferring the last two bytes
			 * will mask NACK errors since TXCOMP is set when a
			 * NACK occurs.
			 */
			at91_twi_write(dev, AT91_TWI_IER,
			       AT91_TWI_TXCOMP);
		} else
			at91_twi_write(dev, AT91_TWI_IER,
			       AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
	} else {
		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
			at91_twi_write_data_dma(dev);
			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
		} else {
			at91_twi_write_next_byte(dev);
			at91_twi_write(dev, AT91_TWI_IER,
				AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
		}
	}

	ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
							dev->adapter.timeout);
	if (ret == 0) {
		dev_err(dev->dev, "controller timed out\n");
		at91_init_twi_bus(dev);
		ret = -ETIMEDOUT;
		goto error;
	}
	if (dev->transfer_status & AT91_TWI_NACK) {
		dev_dbg(dev->dev, "received nack\n");
		ret = -EREMOTEIO;
		goto error;
	}
	if (dev->transfer_status & AT91_TWI_OVRE) {
		dev_err(dev->dev, "overrun while reading\n");
		ret = -EIO;
		goto error;
	}
	if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
		dev_err(dev->dev, "underrun while writing\n");
		ret = -EIO;
		goto error;
	}
	dev_dbg(dev->dev, "transfer complete\n");

	return 0;

error:
	at91_twi_dma_cleanup(dev);
	return ret;
}
Exemplo n.º 9
0
static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
					struct dsi_buf *tp)
{
	int len, ret = 0;
	int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
	char *bp;
	unsigned long size, addr;
	struct mdss_dsi_ctrl_pdata *mctrl = NULL;

	bp = tp->data;

	len = ALIGN(tp->len, 4);
	size = ALIGN(tp->len, SZ_4K);


	if (is_mdss_iommu_attached()) {
		ret = msm_iommu_map_contig_buffer(tp->dmap,
					mdss_get_iommu_domain(domain), 0,
					size, SZ_4K, 0, &(addr));
		if (IS_ERR_VALUE(ret)) {
			pr_err("unable to map dma memory to iommu(%d)\n", ret);
			return -ENOMEM;
		}
	} else {
		addr = tp->dmap;
	}

	INIT_COMPLETION(ctrl->dma_comp);

	/* Ensure that for slave controller, master is also configured */
	if (mdss_dsi_is_slave_ctrl(ctrl)) {
		mctrl = mdss_dsi_get_master_ctrl();
		if (mctrl) {
			MIPI_OUTP(mctrl->ctrl_base + 0x048, addr);
			MIPI_OUTP(mctrl->ctrl_base + 0x04c, len);
		} else {
			pr_warn("%s: Unable to get master control\n",
				__func__);
		}
	}

	MIPI_OUTP((ctrl->ctrl_base) + 0x048, addr);
	MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
	wmb();

	/* Trigger on master controller as well */
	if (mctrl)
		MIPI_OUTP(mctrl->ctrl_base + 0x090, 0x01);

	MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);
	wmb();

	ret = wait_for_completion_timeout(&ctrl->dma_comp,
				msecs_to_jiffies(DMA_TX_TIMEOUT));
	if (ret == 0)
		ret = -ETIMEDOUT;
	else
		ret = tp->len;

	if (is_mdss_iommu_attached())
		msm_iommu_unmap_contig_buffer(addr,
			mdss_get_iommu_domain(domain), 0, size);

	return ret;
}
Exemplo n.º 10
0
static int start_ipc(struct link_device *ld, struct io_device *iod)
{
	struct sk_buff *skb;
	char data[1] = {'a'};
	int err;
	struct usb_link_device *usb_ld = to_usb_link_device(ld);
	struct link_pm_data *pm_data = usb_ld->link_pm_data;
	struct device *dev = &usb_ld->usbdev->dev;
	struct if_usb_devdata *pipe_data = &usb_ld->devdata[IF_USB_FMT_EP];

	if (!usb_ld->if_usb_connected) {
		mif_err("HSIC not connected, skip start ipc\n");
		err = -ENODEV;
		goto exit;
	}

retry:
	if (ld->mc->phone_state != STATE_ONLINE) {
		mif_err("MODEM is not online, skip start ipc\n");
		err = -ENODEV;
		goto exit;
	}

	/* check usb runtime pm first */
	if (dev->power.runtime_status != RPM_ACTIVE) {
		if (!pm_data->resume_requested) {
			mif_debug("QW PM\n");
			INIT_COMPLETION(pm_data->active_done);
			queue_delayed_work(pm_data->wq,
					&pm_data->link_pm_work, 0);
		}
		mif_debug("Wait pm\n");
		err = wait_for_completion_timeout(&pm_data->active_done,
							msecs_to_jiffies(500));
		/* timeout or -ERESTARTSYS */
		if (err <= 0)
			goto retry;
	}

	pm_runtime_get_sync(dev);

	mif_err("send 'a'\n");

	skb = alloc_skb(16, GFP_ATOMIC);
	if (unlikely(!skb)) {
		pm_runtime_put(dev);
		return -ENOMEM;
	}
	memcpy(skb_put(skb, 1), data, 1);
	skbpriv(skb)->iod = iod;
	skbpriv(skb)->ld = ld;

	if (!usb_ld->if_usb_connected || !usb_ld->usbdev)
		return -ENODEV;

	usb_mark_last_busy(usb_ld->usbdev);
	err = usb_tx_urb_with_skb(usb_ld->usbdev, skb, pipe_data);
	if (err < 0) {
		mif_err("usb_tx_urb fail\n");
		dev_kfree_skb_any(skb);
	}

	pm_runtime_put(dev);
exit:
	return err;
}
Exemplo n.º 11
0
int mhl_send_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
			 struct msc_command_struct *req)
{
	int timeout;
	u8 start_bit = 0x00;
	u8 *burst_data;
	int i;
	struct i2c_client *client = mhl_ctrl->i2c_handle;

	if (mhl_ctrl->cur_state != POWER_STATE_D0_MHL) {
		pr_debug("%s: power_state:%02x CBUS(0x0A):%02x\n",
			 __func__,
			 mhl_ctrl->cur_state,
			 MHL_SII_REG_NAME_RD(REG_CBUS_BUS_STATUS));
		return -EFAULT;
	}

	if (!req)
		return -EFAULT;

	pr_debug("%s: command=0x%02x offset=0x%02x %02x %02x",
		 __func__,
		 req->command,
		 req->offset,
		 req->payload.data[0],
		 req->payload.data[1]);

	/* REG_CBUS_PRI_ADDR_CMD = REQ CBUS CMD or OFFSET */
	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->offset);
	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_1ST,
			    req->payload.data[0]);

	switch (req->command) {
	case MHL_SET_INT:
	case MHL_WRITE_STAT:
		start_bit = MSC_START_BIT_WRITE_REG;
		break;
	case MHL_READ_DEVCAP:
		start_bit = MSC_START_BIT_READ_REG;
		break;
	case MHL_GET_STATE:
	case MHL_GET_VENDOR_ID:
	case MHL_SET_HPD:
	case MHL_CLR_HPD:
	case MHL_GET_SC1_ERRORCODE:
	case MHL_GET_DDC_ERRORCODE:
	case MHL_GET_MSC_ERRORCODE:
	case MHL_GET_SC3_ERRORCODE:
		start_bit = MSC_START_BIT_MSC_CMD;
		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
		break;
	case MHL_MSC_MSG:
		start_bit = MSC_START_BIT_VS_CMD;
		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_2ND,
				    req->payload.data[1]);
		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
		break;
	case MHL_WRITE_BURST:
		start_bit = MSC_START_BIT_WRITE_BURST;
		MHL_SII_REG_NAME_WR(REG_MSC_WRITE_BURST_LEN, req->length - 1);
		if (!(req->payload.burst_data)) {
			pr_err("%s: burst data is null!\n", __func__);
			goto cbus_send_fail;
		}
		burst_data = req->payload.burst_data;
		for (i = 0; i < req->length; i++, burst_data++)
			MHL_SII_REG_NAME_WR(REG_CBUS_SCRATCHPAD_0 + i,
				*burst_data);
		break;
	default:
		pr_err("%s: unknown command! (%02x)\n",
		       __func__, req->command);
		goto cbus_send_fail;
	}

	INIT_COMPLETION(mhl_ctrl->msc_cmd_done);
	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_START, start_bit);
	timeout = wait_for_completion_interruptible_timeout
		(&mhl_ctrl->msc_cmd_done, msecs_to_jiffies(T_ABORT_NEXT));
	if (!timeout) {
		pr_err("%s: cbus_command_send timed out!\n", __func__);
		goto cbus_send_fail;
	}

	switch (req->command) {
	case MHL_READ_DEVCAP:
		req->retval = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_RD_DATA_1ST);
		break;
	case MHL_MSC_MSG:
		/* check if MSC_MSG NACKed */
		if (MHL_SII_REG_NAME_RD(REG_MSC_WRITE_BURST_LEN) & BIT6)
			return -EAGAIN;
	default:
		req->retval = 0;
		break;
	}
	mhl_msc_command_done(mhl_ctrl, req);
	pr_debug("%s: msc cmd done\n", __func__);
	return 0;

cbus_send_fail:
	return -EFAULT;
}
Exemplo n.º 12
0
static void mdp4_dsi_video_do_blt(struct msm_fb_data_type *mfd, int enable)
{
	unsigned long flag;
	int cndx = 0;
	struct vsycn_ctrl *vctrl;
	struct mdp4_overlay_pipe *pipe;

	vctrl = &vsync_ctrl_db[cndx];
	pipe = vctrl->base_pipe;

	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);

	if (mfd->ov0_wb_buf->write_addr == 0) {
		pr_info("%s: no blt_base assigned\n", __func__);
		return;
	}

	spin_lock_irqsave(&vctrl->spin_lock, flag);
	if (enable && pipe->ov_blt_addr == 0) {
		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
		pipe->ov_cnt = 0;
		pipe->dmap_cnt = 0;
		vctrl->ov_koff = 0;
		vctrl->ov_done = 0;
		vctrl->blt_free = 0;
		mdp4_stat.blt_dsi_video++;
		vctrl->blt_change++;
	} else if (enable == 0 && pipe->ov_blt_addr) {
		pipe->ov_blt_addr = 0;
		pipe->dma_blt_addr =  0;
		vctrl->blt_free = 4;	
		vctrl->blt_change++;
	}

	pr_info("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
		vctrl->blt_change, enable, (int)pipe->ov_blt_addr);

	if (!vctrl->blt_change) {
		spin_unlock_irqrestore(&vctrl->spin_lock, flag);
		return;
	}

	spin_unlock_irqrestore(&vctrl->spin_lock, flag);

	if (mdp_ov0_blt_ctl == MDP4_BLT_SWITCH_TG_OFF) {
		int tg_enabled;
		pr_debug("%s: blt enabled by switching TG off\n", __func__);
		tg_enabled = inpdw(MDP_BASE + DSI_VIDEO_BASE) & 0x01;
		if (tg_enabled) {
			mdp4_dsi_video_wait4dmap_done(0);
			MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
			msleep(20);
		}
		mdp4_overlayproc_cfg(pipe);
		mdp4_overlay_dmap_xy(pipe);
		if (tg_enabled) {
			if (pipe->ov_blt_addr) {
				spin_lock_irqsave(&vctrl->spin_lock, flag);
				pipe->ov_cnt++;
				vctrl->ov_koff++;
				
				mdp4_stat.kickoff_ov0++;
				INIT_COMPLETION(vctrl->ov_comp);
				vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
				outpdw(MDP_BASE + 0x0004, 0);
				spin_unlock_irqrestore(&vctrl->spin_lock, flag);
				mdp4_dsi_video_wait4ov(0);
			}
			mipi_dsi_sw_reset();
			MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
		}
		vctrl->blt_change = 0;
	}
}
Exemplo n.º 13
0
int mdp4_dsi_video_pipe_commit(int cndx, int wait)
{

	int  i, undx;
	int mixer = 0;
	struct vsycn_ctrl *vctrl;
	struct vsync_update *vp;
	struct mdp4_overlay_pipe *pipe;
	struct mdp4_overlay_pipe *real_pipe;
	unsigned long flags;
	int cnt = 0;

	vctrl = &vsync_ctrl_db[cndx];

	mutex_lock(&vctrl->update_lock);
	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	pipe = vctrl->base_pipe;
	mixer = pipe->mixer_num;

	if (vp->update_cnt == 0) {
		mutex_unlock(&vctrl->update_lock);
		return cnt;
	}

	vctrl->update_ndx++;
	vctrl->update_ndx &= 0x01;
	vp->update_cnt = 0;     
	if (vctrl->blt_free) {
		vctrl->blt_free--;
		if (vctrl->blt_free == 0)
			mdp4_free_writeback_buf(vctrl->mfd, mixer);
	}
	mutex_unlock(&vctrl->update_lock);

	
	mdp4_overlay_iommu_unmap_freelist(mixer);

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (vctrl->ov_koff != vctrl->ov_done) {
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		pr_err("%s: Error, frame dropped %d %d\n", __func__,
				vctrl->ov_koff, vctrl->ov_done);
		return 0;
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	if (vctrl->blt_change) {
		pipe = vctrl->base_pipe;
		spin_lock_irqsave(&vctrl->spin_lock, flags);
		INIT_COMPLETION(vctrl->dmap_comp);
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		mdp4_dsi_video_wait4dmap(0);
		if (pipe->ov_blt_addr)
			mdp4_dsi_video_wait4ov(0);
	}

	pipe = vp->plist;

	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			cnt++;
			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
			if (real_pipe && real_pipe->pipe_used) {
				
				mdp4_overlay_vsync_commit(pipe);
			}
			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
			pipe->pipe_used = 0; 
		}
	}

	mdp4_mixer_stage_commit(mixer);

	pipe = vctrl->base_pipe;
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		mdp4_dsi_video_blt_ov_update(pipe);
		pipe->ov_cnt++;
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
		mb();
		vctrl->ov_koff++;
		
		mdp4_stat.kickoff_ov0++;
		outpdw(MDP_BASE + 0x0004, 0);
	} else {
		
		INIT_COMPLETION(vctrl->dmap_comp);
		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	mdp4_stat.overlay_commit[pipe->mixer_num]++;

	if (wait) {
		if (pipe->ov_blt_addr)
			mdp4_dsi_video_wait4ov(cndx);
		else
			mdp4_dsi_video_wait4dmap(cndx);
	}

	return cnt;
}
Exemplo n.º 14
0
static int omap2_onenand_wait(struct mtd_info *mtd, int state)
{
	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
	struct onenand_chip *this = mtd->priv;
	unsigned int intr = 0;
	unsigned int ctrl, ctrl_mask;
	unsigned long timeout;
	u32 syscfg;

	if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
	    state == FL_VERIFYING_ERASE) {
		int i = 21;
		unsigned int intr_flags = ONENAND_INT_MASTER;

		switch (state) {
		case FL_RESETING:
			intr_flags |= ONENAND_INT_RESET;
			break;
		case FL_PREPARING_ERASE:
			intr_flags |= ONENAND_INT_ERASE;
			break;
		case FL_VERIFYING_ERASE:
			i = 101;
			break;
		}

		while (--i) {
			udelay(1);
			intr = read_reg(c, ONENAND_REG_INTERRUPT);
			if (intr & ONENAND_INT_MASTER)
				break;
		}
		ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
		if (ctrl & ONENAND_CTRL_ERROR) {
			wait_err("controller error", state, ctrl, intr);
			return -EIO;
		}
		if ((intr & intr_flags) == intr_flags)
			return 0;
		/* Continue in wait for interrupt branch */
	}

	if (state != FL_READING) {
		int result;

		/* Turn interrupts on */
		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
		if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
			syscfg |= ONENAND_SYS_CFG1_IOBE;
			write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
			if (cpu_is_omap34xx())
				/* Add a delay to let GPIO settle */
				syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
		}

		INIT_COMPLETION(c->irq_done);
		if (c->gpio_irq) {
			result = gpio_get_value(c->gpio_irq);
			if (result == -1) {
				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
				intr = read_reg(c, ONENAND_REG_INTERRUPT);
				wait_err("gpio error", state, ctrl, intr);
				return -EIO;
			}
		} else
			result = 0;
		if (result == 0) {
			int retry_cnt = 0;
retry:
			result = wait_for_completion_timeout(&c->irq_done,
						    msecs_to_jiffies(20));
			if (result == 0) {
				/* Timeout after 20ms */
				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
				if (ctrl & ONENAND_CTRL_ONGO &&
				    !this->ongoing) {
					/*
					 * The operation seems to be still going
					 * so give it some more time.
					 */
					retry_cnt += 1;
					if (retry_cnt < 3)
						goto retry;
					intr = read_reg(c,
							ONENAND_REG_INTERRUPT);
					wait_err("timeout", state, ctrl, intr);
					return -EIO;
				}
				intr = read_reg(c, ONENAND_REG_INTERRUPT);
				if ((intr & ONENAND_INT_MASTER) == 0)
					wait_warn("timeout", state, ctrl, intr);
			}
		}
	} else {
		int retry_cnt = 0;

		/* Turn interrupts off */
		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
		syscfg &= ~ONENAND_SYS_CFG1_IOBE;
		write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);

		timeout = jiffies + msecs_to_jiffies(20);
		while (1) {
			if (time_before(jiffies, timeout)) {
				intr = read_reg(c, ONENAND_REG_INTERRUPT);
				if (intr & ONENAND_INT_MASTER)
					break;
			} else {
				/* Timeout after 20ms */
				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
				if (ctrl & ONENAND_CTRL_ONGO) {
					/*
					 * The operation seems to be still going
					 * so give it some more time.
					 */
					retry_cnt += 1;
					if (retry_cnt < 3) {
						timeout = jiffies +
							  msecs_to_jiffies(20);
						continue;
					}
				}
				break;
			}
		}
	}

	intr = read_reg(c, ONENAND_REG_INTERRUPT);
	ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);

	if (intr & ONENAND_INT_READ) {
		int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);

		if (ecc) {
			unsigned int addr1, addr8;

			addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
			addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
			if (ecc & ONENAND_ECC_2BIT_ALL) {
				printk(KERN_ERR "onenand_wait: ECC error = "
				       "0x%04x, addr1 %#x, addr8 %#x\n",
				       ecc, addr1, addr8);
				mtd->ecc_stats.failed++;
				return -EBADMSG;
			} else if (ecc & ONENAND_ECC_1BIT_ALL) {
				printk(KERN_NOTICE "onenand_wait: correctable "
				       "ECC error = 0x%04x, addr1 %#x, "
				       "addr8 %#x\n", ecc, addr1, addr8);
				mtd->ecc_stats.corrected++;
			}
		}
	} else if (state == FL_READING) {
		wait_err("timeout", state, ctrl, intr);
		return -EIO;
	}

	if (ctrl & ONENAND_CTRL_ERROR) {
		wait_err("controller error", state, ctrl, intr);
		if (ctrl & ONENAND_CTRL_LOCK)
			printk(KERN_ERR "onenand_wait: "
					"Device is write protected!!!\n");
		return -EIO;
	}

	ctrl_mask = 0xFE9F;
	if (this->ongoing)
		ctrl_mask &= ~0x8000;

	if (ctrl & ctrl_mask)
		wait_warn("unexpected controller status", state, ctrl, intr);

	return 0;
}
Exemplo n.º 15
0
void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
{
	/* complete all the writes before starting */
	wmb();

	/* kick off PPP engine */
	if (term == MDP_PPP_TERM) {
		if (mdp_debug[MDP_PPP_BLOCK])
			jiffies_to_timeval(jiffies, &mdp_ppp_timeval);

		/* let's turn on PPP block */
		mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);

		mdp_enable_irq(term);
		INIT_COMPLETION(mdp_ppp_comp);
		mdp_ppp_waiting = TRUE;
		outpdw(MDP_BASE + 0x30, 0x1000);
		wait_for_completion_killable(&mdp_ppp_comp);
		mdp_disable_irq(term);

		if (mdp_debug[MDP_PPP_BLOCK]) {
			struct timeval now;

			jiffies_to_timeval(jiffies, &now);
			mdp_ppp_timeval.tv_usec =
			    now.tv_usec - mdp_ppp_timeval.tv_usec;
			MSM_FB_INFO("MDP-PPP: %d\n",
				    (int)mdp_ppp_timeval.tv_usec);
		}
	} else if (term == MDP_DMA2_TERM) {
		if (mdp_debug[MDP_DMA2_BLOCK]) {
			MSM_FB_INFO("MDP-DMA2: %d\n",
				    (int)mdp_dma2_timeval.tv_usec);
			jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
		}
		/* DMA update timestamp */
		mdp_dma2_last_update_time = ktime_get_real();
		/* let's turn on DMA2 block */
#if 0
		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
#endif
#ifdef CONFIG_FB_MSM_MDP22
		outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
#else
		mdp_lut_enable();

#ifdef CONFIG_FB_MSM_MDP40
		outpdw(MDP_BASE + 0x000c, 0x0);	/* start DMA */
#else
		outpdw(MDP_BASE + 0x0044, 0x0);	/* start DMA */
#endif
#endif
#ifdef CONFIG_FB_MSM_MDP40
	} else if (term == MDP_DMA_S_TERM) {
		mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
		outpdw(MDP_BASE + 0x0010, 0x0);	/* start DMA */
	} else if (term == MDP_DMA_E_TERM) {
		mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
		outpdw(MDP_BASE + 0x0014, 0x0);	/* start DMA */
	} else if (term == MDP_OVERLAY0_TERM) {
		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
		mdp_lut_enable();
		outpdw(MDP_BASE + 0x0004, 0);
	} else if (term == MDP_OVERLAY1_TERM) {
		mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
		mdp_lut_enable();
		outpdw(MDP_BASE + 0x0008, 0);
	}
#else
	} else if (term == MDP_DMA_S_TERM) {
Exemplo n.º 16
0
long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
				unsigned long arg)
{
	int status, ret = 0;

	if (_IOC_TYPE(cmd) != CHARM_CODE) {
		pr_err("%s: invalid ioctl code\n", __func__);
		return -EINVAL;
	}

	pr_debug("%s: Entering ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
	switch (cmd) {
	case WAKE_CHARM:
		pr_info("%s: Powering on mdm\n", __func__);
		mdm_drv->ops->power_on_mdm_cb(mdm_drv);
		break;
	case CHECK_FOR_BOOT:
		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
			put_user(1, (unsigned long __user *) arg);
		else
			put_user(0, (unsigned long __user *) arg);
		break;
	case NORMAL_BOOT_DONE:
		pr_debug("%s: check if mdm is booted up\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status) {
			pr_debug("%s: normal boot failed\n", __func__);
			mdm_drv->mdm_boot_status = -EIO;
		} else {
			pr_info("%s: normal boot done\n", __func__);
			mdm_drv->mdm_boot_status = 0;
		}
		mdm_drv->mdm_ready = 1;

		if (mdm_drv->ops->normal_boot_done_cb != NULL)
			mdm_drv->ops->normal_boot_done_cb(mdm_drv);

		if (!first_boot)
			complete(&mdm_boot);
		else
			first_boot = 0;

		/* If bootup succeeded, start a timer to check that the
		 * mdm2ap_status gpio goes high.
		 */
		if (!status && gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
			schedule_delayed_work(&mdm2ap_status_check_work,
				msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
		break;
	case RAM_DUMP_DONE:
		pr_debug("%s: mdm done collecting RAM dumps\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status)
			mdm_drv->mdm_ram_dump_status = -EIO;
		else {
			pr_info("%s: ramdump collection completed\n", __func__);
			mdm_drv->mdm_ram_dump_status = 0;
		}
		complete(&mdm_ram_dumps);
		break;
	case WAIT_FOR_RESTART:
		pr_debug("%s: wait for mdm to need images reloaded\n",
				__func__);
		ret = wait_for_completion_interruptible(&mdm_needs_reload);
		if (!ret)
			put_user(mdm_drv->boot_type,
					 (unsigned long __user *) arg);
		INIT_COMPLETION(mdm_needs_reload);
		break;
	case GET_DLOAD_STATUS:
		pr_debug("getting status of mdm2ap_errfatal_gpio\n");
		if (gpio_get_value(mdm_drv->mdm2ap_errfatal_gpio) == 1 &&
			!mdm_drv->mdm_ready)
			put_user(1, (unsigned long __user *) arg);
		else
			put_user(0, (unsigned long __user *) arg);
		break;
	default:
		pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
		ret = -EINVAL;
		break;
	}

	return ret;
}
Exemplo n.º 17
0
int mdp4_lcdc_pipe_commit(int cndx, int wait)
{

	int  i, undx;
	int mixer = 0;
	struct vsycn_ctrl *vctrl;
	struct vsync_update *vp;
	struct mdp4_overlay_pipe *pipe;
	struct mdp4_overlay_pipe *real_pipe;
	unsigned long flags;
	int cnt = 0;

	vctrl = &vsync_ctrl_db[cndx];

	mutex_lock(&vctrl->update_lock);
	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	pipe = vctrl->base_pipe;
	if (pipe == NULL) {
		pr_err("%s: NO base pipe\n", __func__);
		mutex_unlock(&vctrl->update_lock);
		return 0;
	}
	mixer = pipe->mixer_num;

	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);

	/*
	 * allow stage_commit without pipes queued
	 * (vp->update_cnt == 0) to unstage pipes after
	 * overlay_unset
	 */

	vctrl->update_ndx++;
	vctrl->update_ndx &= 0x01;
	vp->update_cnt = 0;     /* reset */
	if (vctrl->blt_free) {
		vctrl->blt_free--;
		if (vctrl->blt_free == 0)
			mdp4_free_writeback_buf(vctrl->mfd, mixer);
	}
	mutex_unlock(&vctrl->update_lock);

	/* free previous committed iommu back to pool */
	mdp4_overlay_iommu_unmap_freelist(mixer);

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (vctrl->ov_koff != vctrl->ov_done) {
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		pr_err("%s: Error, frame dropped %d %d\n", __func__,
			vctrl->ov_koff, vctrl->ov_done);
		return 0;
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1);

	if (vctrl->blt_change) {
		pipe = vctrl->base_pipe;
		spin_lock_irqsave(&vctrl->spin_lock, flags);
		INIT_COMPLETION(vctrl->dmap_comp);
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		mdp4_lcdc_wait4dmap(0);
		if (pipe->ov_blt_addr)
			mdp4_lcdc_wait4ov(0);
	}

	pipe = vp->plist;
	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			cnt++;
			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
			if (real_pipe && real_pipe->pipe_used) {
				/* pipe not unset */
				mdp4_overlay_vsync_commit(pipe);
			}
			/* free previous iommu to freelist
			 * which will be freed at next
			 * pipe_commit
			 */
			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
			pipe->pipe_used = 0; /* clear */
		}
	}

	mdp4_mixer_stage_commit(mixer);

	/* start timing generator & mmu if they are not started yet */
	mdp4_overlay_lcdc_start();

	pipe = vctrl->base_pipe;
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		mdp4_lcdc_blt_ov_update(pipe);
		pipe->ov_cnt++;
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
		mb();
		vctrl->ov_koff++;
		/* kickoff overlay engine */
		mdp4_stat.kickoff_ov0++;
		outpdw(MDP_BASE + 0x0004, 0);
	} else {
		/* schedule second phase update  at dmap */
		INIT_COMPLETION(vctrl->dmap_comp);
		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	mdp4_stat.overlay_commit[pipe->mixer_num]++;

	if (wait) {
		if (pipe->ov_blt_addr)
			mdp4_lcdc_wait4ov(0);
		else
			mdp4_lcdc_wait4dmap(0);
	}

	return cnt;
}
Exemplo n.º 18
0
ssize_t mdp4_dsi_video_show_event(struct device *dev,
	struct device_attribute *attr, char *buf)
{
	int cndx;
	struct vsycn_ctrl *vctrl;
	ssize_t ret = 0;
	unsigned long flags;
	u64 vsync_tick;
//yanghai add the iommu patch 2013.5.25	
#ifdef CONFIG_VENDOR_EDIT
	ktime_t ctime;
       u32 ctick, ptick;
       int diff;
#endif
//yanghai add
	cndx = 0;
	vctrl = &vsync_ctrl_db[0];

	if (atomic_read(&vctrl->suspend) > 0 ||
		atomic_read(&vctrl->vsync_resume) == 0)
		return 0;
//yanghai add the iommu patch 2013.5.25	
#ifdef CONFIG_VENDOR_EDIT	
       /*
        * show_event thread keep spinning on vctrl->vsync_comp
        * race condition on x.done if multiple thread blocked
        * at wait_for_completion(&vctrl->vsync_comp)
        *
        * if show_event thread waked up first then it will come back
        * and call INIT_COMPLETION(vctrl->vsync_comp) which set x.done = 0
        * then second thread wakeed up which set x.done = 0x7ffffffd
        * after that wait_for_completion will never wait.
        * To avoid this, force show_event thread to sleep 5 ms here
        * since it has full vsycn period (16.6 ms) to wait
        */
       ctime = ktime_get();
       ctick = (u32)ktime_to_us(ctime);
       ptick = (u32)ktime_to_us(vctrl->vsync_time);
       ptick += 5000;  /* 5ms */
       diff = ptick - ctick;
       if (diff > 0) {
               if (diff > 1000) /* 1 ms */
                       diff = 1000;
               usleep(diff);
       }
#endif
//yanghai add end
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (vctrl->wait_vsync_cnt == 0)
		INIT_COMPLETION(vctrl->vsync_comp);
	vctrl->wait_vsync_cnt++;
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
	ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
		msecs_to_jiffies(VSYNC_PERIOD * 4));
	if (ret <= 0) {
		vctrl->wait_vsync_cnt = 0;
		vctrl->vsync_time = ktime_get();
	}

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	vsync_tick = ktime_to_ns(vctrl->vsync_time);
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
	buf[strlen(buf) + 1] = '\0';
	return ret;
}
Exemplo n.º 19
0
void bridge_recover_schedule(void)
{
	INIT_COMPLETION(bridge_open_comp);
	recover = true;
	queue_work(bridge_rec_queue, &bridge_recovery_work);
}
int rpmsg_send_command(struct rpmsg_instance *instance, u32 cmd,
						u32 sub, u8 *in,
						u32 *out, u32 inlen,
						u32 outlen)
{
	int ret = 0;

	if (!instance) {
		pr_err("%s: Instance is NULL\n", __func__);
		return -EFAULT;
	}

	/* Hold global rpmsg lock */
	rpmsg_lock();

	mutex_lock(&instance->instance_lock);

	/* Prepare Tx buffer */
	instance->tx_msg->cmd = cmd;
	instance->tx_msg->sub = sub;
	instance->tx_msg->in = in;
	instance->tx_msg->out = out;
	instance->tx_msg->inlen = inlen;
	instance->tx_msg->outlen = outlen;

	/* Preapre Rx buffer */
	mutex_lock(&instance->rx_lock);
	instance->rx_msg->status = -1;
	mutex_unlock(&instance->rx_lock);
	INIT_COMPLETION(instance->reply_arrived);

	/* Send message to remote processor(SCU) using rpdev channel */
	ret = rpmsg_send_offchannel(
					instance->rpdev,
					instance->endpoint->addr,
					instance->rpdev->dst,
					instance->tx_msg,
					sizeof(*instance->tx_msg)
					);
	if (ret) {
		dev_err(&instance->rpdev->dev, "%s failed: %d\n",
						 __func__, ret);
		goto end;
	}

	if (0 == wait_for_completion_timeout(&instance->reply_arrived,
						RPMSG_TX_TIMEOUT)) {
		dev_err(&instance->rpdev->dev,
				"timeout: %d\n", ret);
		ret = -ETIMEDOUT;
		goto end;
	}

	mutex_lock(&instance->rx_lock);
	ret = instance->rx_msg->status;
	mutex_unlock(&instance->rx_lock);
end:
	mutex_unlock(&instance->instance_lock);
	rpmsg_unlock();

	return ret;
}
/* For SSC SPI as MASTER, TX/RX is handled as follows:

   1. Fill the TX_FIFO with up to (SSC_TXFIFO_SIZE - 1) words, and enable
      TX_FIFO_EMPTY interrupts.
   2. When the last word of TX_FIFO is copied to the shift register,
      a TX_FIFO_EMPTY interrupt is issued, and the last word will *start* being
      shifted out/in.
   3. On receiving a TX_FIFO_EMPTY interrupt, copy all *available* received
      words from the RX_FIFO. Note, depending on the time taken to shift out/in
      the 'last' word compared to the IRQ latency, the 'last' word may not be
      available yet in the RX_FIFO.
   4. If there are more bytes to TX, refill the TX_FIFO.  Since the 'last' word
      from the previous iteration may still be (or about to be) in the RX_FIFO,
      only add up to (SSC_TXFIFO_SIZE - 1) words.  If all bytes have been
      transmitted, disable TX and set completion.
   5. If we are interested in the received data, check to see if the 'last' word
      has been received.  If not, then wait the period of shifting 1 word, then
      read the 'last' word from the RX_FIFO.

*/
static void spi_stmssc_fill_tx_fifo(struct spi_stm_ssc *st_ssc)
{
	union {
		u8 bytes[4];
		u32 dword;
	} tmp = {.dword = 0,};
	int i;

	for (i = 0;
	     i < SSC_TXFIFO_SIZE - 1 && st_ssc->tx_bytes_pending > 0; i++) {
		if (st_ssc->bits_per_word > 8) {
			if (st_ssc->tx_ptr) {
				tmp.bytes[1] = *st_ssc->tx_ptr++;
				tmp.bytes[0] = *st_ssc->tx_ptr++;
			} else {
				tmp.bytes[1] = 0;
				tmp.bytes[0] = 0;
			}

			st_ssc->tx_bytes_pending -= 2;

		} else {
			if (st_ssc->tx_ptr)
				tmp.bytes[0] = *st_ssc->tx_ptr++;
			else
				tmp.bytes[0] = 0;

			st_ssc->tx_bytes_pending--;
		}
		ssc_store32(st_ssc, SSC_TBUF, tmp.dword);
	}
}

static int spi_stmssc_rx_mopup(struct spi_stm_ssc *st_ssc)
{
	unsigned long word_period_ns;
	u32 rx_fifo_status;
	union {
		u8 bytes[4];
		u32 dword;
	} tmp = {.dword = 0,};

	dgb_print("\n");

	word_period_ns = 1000000000 / st_ssc->baud;
	word_period_ns *= st_ssc->bits_per_word;

	/* delay for period equivalent to shifting 1 complete word
	   out of and into shift register */
	ndelay(word_period_ns);

	/* Check 'last' word is actually there! */
	rx_fifo_status = ssc_load32(st_ssc, SSC_RX_FSTAT);
	if (rx_fifo_status == 1) {
		tmp.dword = ssc_load32(st_ssc, SSC_RBUF);

		if (st_ssc->bits_per_word > 8) {
			if (st_ssc->rx_ptr) {
				*st_ssc->rx_ptr++ = tmp.bytes[1];
				*st_ssc->rx_ptr++ = tmp.bytes[0];
			}
			st_ssc->rx_bytes_pending -= 2;
		} else {
			if (st_ssc->rx_ptr)
				*st_ssc->rx_ptr++ = tmp.bytes[0];
			st_ssc->rx_bytes_pending--;
		}
	} else {
		dgb_print("should only be one word in RX_FIFO"
			  "(rx_fifo_status = %d)\n", rx_fifo_status);
	}

	return 0;
}


static int spi_stmssc_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
	struct spi_stm_ssc *st_ssc;

	dgb_print("\n");

	st_ssc = spi_master_get_devdata(spi->master);

	st_ssc->tx_ptr = t->tx_buf;
	st_ssc->rx_ptr = t->rx_buf;
	st_ssc->tx_bytes_pending = t->len;
	st_ssc->rx_bytes_pending = t->len;
	INIT_COMPLETION(st_ssc->done);

	/* fill TX_FIFO */
	spi_stmssc_fill_tx_fifo(st_ssc);

	/* enable TX_FIFO_EMPTY interrupts */
	ssc_store32(st_ssc, SSC_IEN, SSC_IEN_TIEN);

	/* wait for all bytes to be transmitted*/
	wait_for_completion(&st_ssc->done);

	/* check 'last' byte has been received */
	/* NOTE: need to read rxbuf, even if ignoring the result! */
	if (st_ssc->rx_bytes_pending)
		spi_stmssc_rx_mopup(st_ssc);

	/* disable ints */
	ssc_store32(st_ssc, SSC_IEN, 0x0);

	return t->len - st_ssc->tx_bytes_pending;
}



static irqreturn_t spi_stmssc_irq(int irq, void *dev_id)
{
	struct spi_stm_ssc *st_ssc = (struct spi_stm_ssc *)dev_id;
	unsigned int rx_fifo_status;
	u32 ssc_status;

	union {
		u8 bytes[4];
		u32 dword;
	} tmp = {.dword = 0,};

	ssc_status = ssc_load32(st_ssc, SSC_STA);

	/* FIFO_TX_EMPTY */
	if (ssc_status & SSC_STA_TIR) {
		/* Find number of words available in RX_FIFO: 8 if RX_FIFO_FULL,
		   else SSC_RX_FSTAT (0-7)
		*/
		rx_fifo_status = (ssc_status & SSC_STA_RIR) ? 8 :
			ssc_load32(st_ssc, SSC_RX_FSTAT);

		/* Read all available words from RX_FIFO */
		while (rx_fifo_status) {
			tmp.dword = ssc_load32(st_ssc, SSC_RBUF);

			if (st_ssc->bits_per_word > 8) {
				if (st_ssc->rx_ptr) {
					*st_ssc->rx_ptr++ = tmp.bytes[1];
					*st_ssc->rx_ptr++ = tmp.bytes[0];
				}
				st_ssc->rx_bytes_pending -= 2;
			} else {
				if (st_ssc->rx_ptr)
					*st_ssc->rx_ptr++ = tmp.bytes[0];
				st_ssc->rx_bytes_pending--;
			}

			rx_fifo_status = ssc_load32(st_ssc, SSC_RX_FSTAT);
		}

		/* See if there is more data to send */
		if (st_ssc->tx_bytes_pending > 0)
			spi_stmssc_fill_tx_fifo(st_ssc);
		else {
			/* No more data to send */
			ssc_store32(st_ssc, SSC_IEN, 0x0);
			complete(&st_ssc->done);
		}
	}

	return IRQ_HANDLED;
}


static int __init spi_stm_probe(struct platform_device *pdev)
{
	struct ssc_pio_t *pio_info =
			(struct ssc_pio_t *)pdev->dev.platform_data;
	struct spi_master *master;
	struct resource *res;
	struct spi_stm_ssc *st_ssc;

	u32 reg;

	master = spi_alloc_master(&pdev->dev, sizeof(struct spi_stm_ssc));
	if (!master)
		return -ENOMEM;

	platform_set_drvdata(pdev, master);

	st_ssc = spi_master_get_devdata(master);
	st_ssc->bitbang.master     = spi_master_get(master);
	st_ssc->bitbang.setup_transfer = spi_stmssc_setup_transfer;
	st_ssc->bitbang.txrx_bufs  = spi_stmssc_txrx_bufs;
	st_ssc->bitbang.master->setup = spi_stmssc_setup;

	if (pio_info->chipselect)
		st_ssc->bitbang.chipselect = (void (*)
					      (struct spi_device *, int))
			pio_info->chipselect;
	else
		st_ssc->bitbang.chipselect = spi_stpio_chipselect;

	master->num_chipselect = SPI_NO_CHIPSELECT + 1;
	master->bus_num = pdev->id;
	init_completion(&st_ssc->done);

	/* Get resources */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	if (!devm_request_mem_region(&pdev->dev, res->start,
				     res->end - res->start, "spi")) {
		printk(KERN_ERR NAME " Request mem 0x%x region failed\n",
		       res->start);
		return -ENOMEM;
	}

	st_ssc->base =
		(unsigned long) devm_ioremap_nocache(&pdev->dev, res->start,
						     res->end - res->start);
	if (!st_ssc->base) {
		printk(KERN_ERR NAME " Request iomem 0x%x region failed\n",
		       res->start);
		return -ENOMEM;
	}

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res) {
		printk(KERN_ERR NAME " Request irq %d failed\n", res->start);
		return -ENODEV;
	}

	if (devm_request_irq(&pdev->dev, res->start, spi_stmssc_irq,
		IRQF_DISABLED, "stspi", st_ssc) < 0) {
		printk(KERN_ERR NAME " Request irq failed\n");
		return -ENODEV;
	}

	/* Check for hard wired SSC which doesn't use PIO pins */
	if (pio_info->pio[0].pio_port == SSC_NO_PIO)
		goto ssc_hard_wired;

	/* Get PIO pins */
	pio_info->clk = stpio_request_set_pin(pio_info->pio[0].pio_port,
					  pio_info->pio[0].pio_pin,
					      "SPI Clock", STPIO_BIDIR, 0);
	if (!pio_info->clk) {
		printk(KERN_ERR NAME
		       " Failed to allocate clk pin (PIO%d[%d])\n",
		       pio_info->pio[0].pio_port, pio_info->pio[0].pio_pin);
		return -ENODEV;
	}
	pio_info->sdout = stpio_request_set_pin(pio_info->pio[1].pio_port,
					    pio_info->pio[1].pio_pin,
						"SPI Data out", STPIO_BIDIR, 0);
	if (!pio_info->sdout) {
		printk(KERN_ERR NAME
		       " Failed to allocate sdo pin (PIO%d[%d])\n",
		       pio_info->pio[1].pio_port, pio_info->pio[1].pio_pin);
		return -ENODEV;
	}
	pio_info->sdin = stpio_request_pin(pio_info->pio[2].pio_port,
					   pio_info->pio[2].pio_pin,
					   "SPI Data in", STPIO_IN);
	if (!pio_info->sdin) {
		printk(KERN_ERR NAME
		       " Failed to allocate sdi pin (PIO%d[%d])\n",
		       pio_info->pio[2].pio_port, pio_info->pio[2].pio_pin);
		return -ENODEV;
	}

ssc_hard_wired:

	/* Disable I2C and Reset SSC */
	ssc_store32(st_ssc, SSC_I2C, 0x0);
	reg = ssc_load16(st_ssc, SSC_CTL);
	reg |= SSC_CTL_SR;
	ssc_store32(st_ssc, SSC_CTL, reg);

	udelay(1);
	reg = ssc_load32(st_ssc, SSC_CTL);
	reg &= ~SSC_CTL_SR;
	ssc_store32(st_ssc, SSC_CTL, reg);

	/* Set SSC into slave mode before reconfiguring PIO pins */
	reg = ssc_load32(st_ssc, SSC_CTL);
	reg &= ~SSC_CTL_MS;
	ssc_store32(st_ssc, SSC_CTL, reg);

	if (pio_info->pio[0].pio_port == SSC_NO_PIO)
		goto ssc_hard_wired2;

#ifdef CONFIG_CPU_SUBTYPE_STX7141
	stpio_configure_pin(pio_info->clk, STPIO_OUT);
	stpio_configure_pin(pio_info->sdout, STPIO_OUT);
	stpio_configure_pin(pio_info->sdin, STPIO_IN);
#else
	stpio_configure_pin(pio_info->clk, STPIO_ALT_OUT);
	stpio_configure_pin(pio_info->sdout, STPIO_ALT_OUT);
	stpio_configure_pin(pio_info->sdin, STPIO_IN);
#endif

ssc_hard_wired2:

	st_ssc->fcomms = clk_get_rate(clk_get(NULL, "comms_clk"));;

	/* Start bitbang worker */
	if (spi_bitbang_start(&st_ssc->bitbang)) {
		printk(KERN_ERR NAME
		       " The SPI Core refuses the spi_stm_ssc adapter\n");
		return -1;
	}

	printk(KERN_INFO NAME ": Registered SPI Bus %d: "
	       "CLK[%d,%d] SDOUT[%d, %d] SDIN[%d, %d]\n", master->bus_num,
	       pio_info->pio[0].pio_port, pio_info->pio[0].pio_pin,
	       pio_info->pio[1].pio_port, pio_info->pio[1].pio_pin,
	       pio_info->pio[2].pio_port, pio_info->pio[2].pio_pin);

	return 0;
}

static int  spi_stm_remove(struct platform_device *pdev)
{
	struct spi_stm_ssc *st_ssc;
	struct spi_master *master;
	struct ssc_pio_t *pio_info =
		(struct ssc_pio_t *)pdev->dev.platform_data;

	master = platform_get_drvdata(pdev);
	st_ssc = spi_master_get_devdata(master);

	spi_bitbang_stop(&st_ssc->bitbang);

	if (pio_info->sdin) {
		stpio_free_pin(pio_info->sdin);
		stpio_free_pin(pio_info->clk);
		stpio_free_pin(pio_info->sdout);
	}

	return 0;
}

static struct platform_driver spi_hw_driver = {
	.driver.name = "spi_st_ssc",
	.driver.owner = THIS_MODULE,
	.probe = spi_stm_probe,
	.remove = spi_stm_remove,
};


static int __init spi_stm_ssc_init(void)
{
	printk(KERN_INFO NAME ": SSC SPI Driver\n");
	return platform_driver_register(&spi_hw_driver);
}

static void __exit spi_stm_ssc_exit(void)
{
	dgb_print("\n");
	platform_driver_unregister(&spi_hw_driver);
}

module_init(spi_stm_ssc_init);
module_exit(spi_stm_ssc_exit);

MODULE_AUTHOR("STMicroelectronics <www.st.com>");
MODULE_DESCRIPTION("STM SSC SPI driver");
MODULE_LICENSE("GPL");
static int32_t qpnp_iadc_configure(enum qpnp_iadc_channels channel,
					uint16_t *raw_code, uint32_t mode_sel)
{
	struct qpnp_iadc_drv *iadc = qpnp_iadc;
	u8 qpnp_iadc_mode_reg = 0, qpnp_iadc_ch_sel_reg = 0;
	u8 qpnp_iadc_conv_req = 0, qpnp_iadc_dig_param_reg = 0;
	u8 status1 = 0;
	uint32_t count = 0;
	int32_t rc = 0;

	qpnp_iadc_ch_sel_reg = channel;

	qpnp_iadc_dig_param_reg |= iadc->adc->amux_prop->decimation <<
					QPNP_IADC_DEC_RATIO_SEL;
	if (iadc->iadc_mode_sel)
		qpnp_iadc_mode_reg |= (QPNP_ADC_TRIM_EN | QPNP_VADC_SYNCH_EN);
	else
		qpnp_iadc_mode_reg |= QPNP_ADC_TRIM_EN;

	qpnp_iadc_conv_req = QPNP_IADC_CONV_REQ;

	rc = qpnp_iadc_write_reg(QPNP_IADC_MODE_CTL, qpnp_iadc_mode_reg);
	if (rc) {
		pr_err("qpnp adc read adc failed with %d\n", rc);
		return rc;
	}

	rc = qpnp_iadc_write_reg(QPNP_IADC_ADC_CH_SEL_CTL,
						qpnp_iadc_ch_sel_reg);
	if (rc) {
		pr_err("qpnp adc read adc failed with %d\n", rc);
		return rc;
	}

	rc = qpnp_iadc_write_reg(QPNP_ADC_DIG_PARAM,
						qpnp_iadc_dig_param_reg);
	if (rc) {
		pr_err("qpnp adc read adc failed with %d\n", rc);
		return rc;
	}

	rc = qpnp_iadc_write_reg(QPNP_HW_SETTLE_DELAY,
				iadc->adc->amux_prop->hw_settle_time);
	if (rc < 0) {
		pr_err("qpnp adc configure error for hw settling time setup\n");
		return rc;
	}

	rc = qpnp_iadc_write_reg(QPNP_FAST_AVG_CTL,
					iadc->adc->amux_prop->fast_avg_setup);
	if (rc < 0) {
		pr_err("qpnp adc fast averaging configure error\n");
		return rc;
	}

	if (!iadc->iadc_poll_eoc)
		INIT_COMPLETION(iadc->adc->adc_rslt_completion);

	rc = qpnp_iadc_enable(true);
	if (rc)
		return rc;

	rc = qpnp_iadc_write_reg(QPNP_CONV_REQ, qpnp_iadc_conv_req);
	if (rc) {
		pr_err("qpnp adc read adc failed with %d\n", rc);
		return rc;
	}

	if (iadc->iadc_poll_eoc) {
		while (status1 != QPNP_STATUS1_EOC) {
			rc = qpnp_iadc_read_reg(QPNP_STATUS1, &status1);
			if (rc < 0)
				return rc;
			status1 &= QPNP_STATUS1_REQ_STS_EOC_MASK;
			usleep_range(QPNP_ADC_CONV_TIME_MIN,
					QPNP_ADC_CONV_TIME_MAX);
			count++;
			if (count > QPNP_ADC_ERR_COUNT) {
				pr_err("retry error exceeded\n");
				rc = qpnp_iadc_status_debug();
				if (rc < 0)
					pr_err("IADC status debug failed\n");
				rc = -EINVAL;
				return rc;
			}
		}
	} else {
		rc = wait_for_completion_timeout(
				&iadc->adc->adc_rslt_completion,
				QPNP_ADC_COMPLETION_TIMEOUT);
		if (!rc) {
			rc = qpnp_iadc_read_reg(QPNP_STATUS1, &status1);
			if (rc < 0)
				return rc;
			status1 &= QPNP_STATUS1_REQ_STS_EOC_MASK;
			if (status1 == QPNP_STATUS1_EOC)
				pr_debug("End of conversion status set\n");
			else {
				rc = qpnp_iadc_status_debug();
				if (rc < 0) {
					pr_err("status debug failed %d\n", rc);
					return rc;
				}
				return -EINVAL;
			}
		}
	}

	rc = qpnp_iadc_read_conversion_result(raw_code);
	if (rc) {
		pr_err("qpnp adc read adc failed with %d\n", rc);
		return rc;
	}

	return 0;
}
Exemplo n.º 23
0
int mdp4_lcdc_pipe_commit(int cndx, int wait)
{

	int  i, undx;
	int mixer = 0;
	struct vsycn_ctrl *vctrl;
	struct vsync_update *vp;
	struct mdp4_overlay_pipe *pipe;
	struct mdp4_overlay_pipe *real_pipe;
	unsigned long flags;
	int cnt = 0;

	vctrl = &vsync_ctrl_db[cndx];

	mutex_lock(&vctrl->update_lock);
	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	pipe = vctrl->base_pipe;
	if (pipe == NULL) {
		pr_err("%s: NO base pipe\n", __func__);
		mutex_unlock(&vctrl->update_lock);
		return 0;
	}
	mixer = pipe->mixer_num;

	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);

	/*
	 * allow stage_commit without pipes queued
	 * (vp->update_cnt == 0) to unstage pipes after
	 * overlay_unset
	 */

	vctrl->update_ndx++;
	vctrl->update_ndx &= 0x01;
	vp->update_cnt = 0;     /* reset */
	if (vctrl->blt_free) {
		vctrl->blt_free--;
		if (vctrl->blt_free == 0)
			mdp4_free_writeback_buf(vctrl->mfd, mixer);
	}
	mutex_unlock(&vctrl->update_lock);

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (vctrl->ov_koff != vctrl->ov_done) {
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		pr_err("%s: Error, frame dropped %d %d\n", __func__,
			vctrl->ov_koff, vctrl->ov_done);
		return 0;
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1);

	if (vctrl->blt_change) {
		pipe = vctrl->base_pipe;
		spin_lock_irqsave(&vctrl->spin_lock, flags);
		INIT_COMPLETION(vctrl->dmap_comp);
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		mdp4_lcdc_wait4dmap(0);
		if (pipe->ov_blt_addr)
			mdp4_lcdc_wait4ov(0);
	}

	pipe = vp->plist;
	vctrl->mfd->cached_reg_cnt = 0;
	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			cnt++;
			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
			if (real_pipe && real_pipe->pipe_used) {
				pipe->mfd = vctrl->mfd;
				if (!wait || vctrl->base_pipe->ov_blt_addr)
					pipe->mfd->cache_reg_en = false;
				else
					pipe->mfd->cache_reg_en = true;
				/* pipe not unset */
				mdp4_overlay_vsync_commit(pipe);
			}
		}
	}

	mdp4_mixer_stage_commit(mixer);

	/* start timing generator & mmu if they are not started yet */
	mdp4_overlay_lcdc_start();

	/*
	 * there has possibility that pipe commit come very close to next vsync
	 * this may cause two consecutive pie_commits happen within same vsync
	 * period which casue iommu page fault when previous iommu buffer
	 * freed. Set ION_IOMMU_UNMAP_DELAYED flag at ion_map_iommu() to
	 * add delay unmap iommu buffer to fix this problem.
	 * Also ion_unmap_iommu() may take as long as 9 ms to free an ion buffer.
	 * therefore mdp4_overlay_iommu_unmap_freelist(mixer) should be called
	 * ater stage_commit() to ensure pipe_commit (up to stage_commit)
	 * is completed within vsync period.
	 */

	/* free previous committed iommu back to pool */
	mdp4_overlay_iommu_unmap_freelist(mixer);

	pipe = vp->plist;
	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			/* free previous iommu to freelist
			* which will be freed at next
			* pipe_commit
			*/
			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
			pipe->pipe_used = 0; /* clear */
		}
	}

	pipe = vctrl->base_pipe;
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		mdp4_lcdc_blt_ov_update(pipe);
		pipe->ov_cnt++;
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
		mb();
		vctrl->ov_koff++;
		/* kickoff overlay engine */
		mdp4_stat.kickoff_ov0++;
		outpdw(MDP_BASE + 0x0004, 0);
	} else {
		/* schedule second phase update  at dmap */
		INIT_COMPLETION(vctrl->dmap_comp);
		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	mdp4_stat.overlay_commit[pipe->mixer_num]++;

	if (wait) {
		if (pipe->ov_blt_addr)
			mdp4_lcdc_wait4ov(0);
		else
			mdp4_lcdc_wait4dmap(0);
	}

	return cnt;
}
Exemplo n.º 24
0
int mdp4_dtv_pipe_commit(int cndx, int wait)
{

	int  i, undx;
	int mixer = 0;
	struct vsycn_ctrl *vctrl;
	struct vsync_update *vp;
	struct mdp4_overlay_pipe *pipe;
	struct mdp4_overlay_pipe *real_pipe;
	unsigned long flags;
	int cnt = 0;

	vctrl = &vsync_ctrl_db[cndx];
	mutex_lock(&vctrl->update_lock);
	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	pipe = vctrl->base_pipe;
	mixer = pipe->mixer_num;
	mdp4_overlay_iommu_unmap_freelist(mixer);

	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);

	if (vp->update_cnt == 0) {
		mutex_unlock(&vctrl->update_lock);
		return 0;
	}

	vctrl->update_ndx++;
	vctrl->update_ndx &= 0x01;
	vp->update_cnt = 0;	/* reset */
	mutex_unlock(&vctrl->update_lock);

	pipe = vp->plist;
	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			cnt++;
			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
			if (real_pipe && real_pipe->pipe_used) {
				/* pipe not unset */
				mdp4_overlay_vsync_commit(pipe);
			}
			/* free previous iommu to freelist
			* which will be freed at next
			* pipe_commit
			*/
			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
			pipe->pipe_used = 0; /* clear */
		}
	}
	mdp4_mixer_stage_commit(mixer);

	 /* start timing generator & mmu if they are not started yet */
	mdp4_overlay_dtv_start();

	pipe = vctrl->base_pipe;
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		mdp4_dtv_blt_ov_update(pipe);
		pipe->blt_ov_done++;
		vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
		mb();
		pipe->blt_ov_koff++;
		/* kickoff overlay1 engine */
		mdp4_stat.kickoff_ov1++;
		outpdw(MDP_BASE + 0x0008, 0);
	} else {
		/* schedule second phase update  at dmap */
		INIT_COMPLETION(vctrl->dmae_comp);
		vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
	mdp4_stat.overlay_commit[pipe->mixer_num]++;

	if (wait)
		mdp4_dtv_wait4dmae(cndx);

	return cnt;
}
Exemplo n.º 25
0
int mdp4_dsi_cmd_pipe_commit(int cndx, int wait)
{
	int  i, undx;
	int mixer = 0;
	struct vsycn_ctrl *vctrl;
	struct vsync_update *vp;
	struct mdp4_overlay_pipe *pipe;
	struct mdp4_overlay_pipe *real_pipe;
	unsigned long flags;
	int need_dmap_wait = 0;
	int need_ov_wait = 0;
	int cnt = 0;

	vctrl = &vsync_ctrl_db[0];

	mutex_lock(&vctrl->update_lock);
	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	pipe = vctrl->base_pipe;
	if (pipe == NULL) {
		pr_err("%s: NO base pipe\n", __func__);
		mutex_unlock(&vctrl->update_lock);
		return 0;
	}

	mixer = pipe->mixer_num;

	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);

	/*
	 * allow stage_commit without pipes queued
	 * (vp->update_cnt == 0) to unstage pipes after
	 * overlay_unset
	 */

	vctrl->update_ndx++;
	vctrl->update_ndx &= 0x01;
	vp->update_cnt = 0;     /* reset */
	if (vctrl->blt_free) {
		vctrl->blt_free--;
		if (vctrl->blt_free == 0)
			mdp4_free_writeback_buf(vctrl->mfd, mixer);
	}

	if (mdp4_dsi_cmd_clk_check(vctrl) < 0) {
		mdp4_dsi_cmd_pipe_clean(vp);
		mutex_unlock(&vctrl->update_lock);
		return 0;
	}
	mutex_unlock(&vctrl->update_lock);

	/* free previous committed iommu back to pool */
	mdp4_overlay_iommu_unmap_freelist(mixer);

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		/* Blt */
		if (vctrl->blt_wait) {
			INIT_COMPLETION(vctrl->dmap_comp);
			need_dmap_wait = 1;
		}
		if (vctrl->ov_koff != vctrl->ov_done) {
			INIT_COMPLETION(vctrl->ov_comp);
			need_ov_wait = 1;
		}
	} else {
		/* direct out */
		if (vctrl->dmap_koff != vctrl->dmap_done) {
			INIT_COMPLETION(vctrl->dmap_comp);
			pr_debug("%s: wait, ok=%d od=%d dk=%d dd=%d cpu=%d\n",
			 __func__, vctrl->ov_koff, vctrl->ov_done,
			vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id());
			need_dmap_wait = 1;
		}
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	if (need_dmap_wait) {
		pr_debug("%s: wait4dmap\n", __func__);
		mdp4_dsi_cmd_wait4dmap(0);
	}

	if (need_ov_wait) {
		pr_debug("%s: wait4ov\n", __func__);
		mdp4_dsi_cmd_wait4ov(0);
	}

	if (pipe->ov_blt_addr) {
		if (vctrl->blt_end) {
			vctrl->blt_end = 0;
			pipe->ov_blt_addr = 0;
			pipe->dma_blt_addr =  0;
		}
	}

	if (vctrl->blt_change) {
		mdp4_overlayproc_cfg(pipe);
		mdp4_overlay_dmap_xy(pipe);
		vctrl->blt_change = 0;
	}

	pipe = vp->plist;
	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			cnt++;
			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
			if (real_pipe && real_pipe->pipe_used) {
				/* pipe not unset */
				mdp4_overlay_vsync_commit(pipe);
			}
			/* free previous iommu to freelist
			* which will be freed at next
			* pipe_commit
			*/
			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
			pipe->pipe_used = 0; /* clear */
		}
	}

	/* tx dcs command if had any */
	mipi_dsi_cmdlist_commit(1);

	mdp4_mixer_stage_commit(mixer);

	pipe = vctrl->base_pipe;
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		mdp4_dsi_cmd_blt_ov_update(pipe);
		pipe->ov_cnt++;
		vctrl->ov_koff++;
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
	} else {
		INIT_COMPLETION(vctrl->dmap_comp);
		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
		vctrl->dmap_koff++;
	}
	pr_debug("%s: kickoff, pid=%d\n", __func__, current->pid);
	/* kickoff overlay engine */
	mdp4_stat.kickoff_ov0++;
	mdp_pipe_kickoff_simplified(MDP_OVERLAY0_TERM);
	mb(); /* make sure kickoff ececuted */
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	mdp4_stat.overlay_commit[pipe->mixer_num]++;

	if (wait)
		mdp4_dsi_cmd_wait4vsync(0);

	return cnt;
}
Exemplo n.º 26
0
void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
{
	unsigned long flag;
	int need_wait = 0;
	int wait_cnt = 0;
#ifdef MDP_HANG_DEBUG
	mipi_dsi_cmd_mode_on = 1;
#endif

	if (dsi_clock_timer.function) {
#ifndef MDP_HANG_DEBUG
		if (time_after(jiffies, tout_expired)) {
#endif
			tout_expired = jiffies + TOUT_PERIOD;
			mod_timer(&dsi_clock_timer, tout_expired);
#ifndef MDP_HANG_DEBUG
			tout_expired -= MS_100;
		}
#endif
	}

	pr_debug("%s: start pid=%d dsi_clk_on=%d\n",
			__func__, current->pid, mipi_dsi_clk_on);

	/* satrt dsi clock if necessary */
	spin_lock_bh(&dsi_clk_lock);
	if (mipi_dsi_clk_on == 0) {
		mipi_dsi_turn_on_clks();
	}
	spin_unlock_bh(&dsi_clk_lock);
/*
*QCT_PATCH-sbyun to avoid confilt
*between on going video image and new overlay image,
*add some delay when mdp of dma is in busy status
*/
	spin_lock_irqsave(&mdp_spin_lock, flag);
	if (mfd->dma->busy == TRUE) {
		if (busy_wait_cnt == 0)
			INIT_COMPLETION(mfd->dma->comp);
		busy_wait_cnt++;
		need_wait++;
#if defined(DEBUG_MDP_LOCKUP)
		mdp4_stat.dma_wait_start_time = jiffies;
#endif
	}
	spin_unlock_irqrestore(&mdp_spin_lock, flag);

	if (need_wait) {
		/* wait until DMA finishes the current job */
		pr_debug("%s: pending pid=%d dsi_clk_on=%d\n",
				__func__, current->pid, mipi_dsi_clk_on);
#ifdef MDP_HANG_DEBUG
		if (!wait_for_completion_timeout(&mfd->dma->comp,
		msecs_to_jiffies(VSYNC_PERIOD*30))) {
			/* Does not receive interrupt from MDP,
				Something wrong */

			dump_mdp_registers();
			panic("mdp interrupt missing");
		}

#else
		wait_for_completion(&mfd->dma->comp);
#endif

	}

#ifdef MDP_HANG_DEBUG
	mipi_dsi_cmd_mode_on = 0;
#endif
	pr_debug("%s: done pid=%d dsi_clk_on=%d\n",
			 __func__, current->pid, mipi_dsi_clk_on);
}
static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
					struct dsi_buf *tp)
{
	int len, ret = 0;
	int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
	char *bp;
	unsigned long size, addr;

	bp = tp->data;

	len = ALIGN(tp->len, 4);
	size = ALIGN(tp->len, SZ_4K);


	if (is_mdss_iommu_attached()) {
		int ret = msm_iommu_map_contig_buffer(tp->dmap,
					mdss_get_iommu_domain(domain), 0,
					size, SZ_4K, 0, &(addr));
		if (IS_ERR_VALUE(ret)) {
			pr_err("unable to map dma memory to iommu(%d)\n", ret);
			return -ENOMEM;
		}
	} else {
		addr = tp->dmap;
	}

	INIT_COMPLETION(ctrl->dma_comp);

	if (ctrl->shared_pdata.broadcast_enable)
		if ((ctrl->ndx == DSI_CTRL_1)
		  && (left_ctrl_pdata != NULL)) {
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x048, addr);
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x04c, len);
		}

	MIPI_OUTP((ctrl->ctrl_base) + 0x048, addr);
	MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
	wmb();

	if (ctrl->shared_pdata.broadcast_enable)
		if ((ctrl->ndx == DSI_CTRL_1)
		  && (left_ctrl_pdata != NULL)) {
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x090, 0x01);
		}

	MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);	/* trigger */
	wmb();

	ret = wait_for_completion_timeout(&ctrl->dma_comp,
				msecs_to_jiffies(DMA_TX_TIMEOUT));
	if (ret == 0)
		ret = -ETIMEDOUT;
	else
		ret = tp->len;

	if (is_mdss_iommu_attached())
		msm_iommu_unmap_contig_buffer(addr,
			mdss_get_iommu_domain(domain), 0, size);

	return ret;
}
Exemplo n.º 28
0
static int process_crypt_req(struct tegra_crypto_ctx *ctx, struct tegra_crypt_req *crypt_req)
{
	struct crypto_ablkcipher *tfm;
	struct ablkcipher_request *req = NULL;
	struct scatterlist in_sg;
	struct scatterlist out_sg;
	unsigned long *xbuf[NBUFS];
	int ret = 0, size = 0;
	unsigned long total = 0;
	struct tegra_crypto_completion tcrypt_complete;

	if (crypt_req->op & TEGRA_CRYPTO_ECB) {
		req = ablkcipher_request_alloc(ctx->ecb_tfm, GFP_KERNEL);
		tfm = ctx->ecb_tfm;
	} else {
		req = ablkcipher_request_alloc(ctx->cbc_tfm, GFP_KERNEL);
		tfm = ctx->cbc_tfm;
	}
	if (!req) {
		pr_err("%s: Failed to allocate request\n", __func__);
		return -ENOMEM;
	}

	if ((crypt_req->keylen < 0) || (crypt_req->keylen > AES_MAX_KEY_SIZE))
		return -EINVAL;

	crypto_ablkcipher_clear_flags(tfm, ~0);

	if (!ctx->use_ssk) {
		ret = crypto_ablkcipher_setkey(tfm, crypt_req->key,
			crypt_req->keylen);
		if (ret < 0) {
			pr_err("setkey failed");
			goto process_req_out;
		}
	}

	ret = alloc_bufs(xbuf);
	if (ret < 0) {
		pr_err("alloc_bufs failed");
		goto process_req_out;
	}

	init_completion(&tcrypt_complete.restart);

	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
		tegra_crypt_complete, &tcrypt_complete);

	total = crypt_req->plaintext_sz;
	while (total > 0) {
		size = min(total, PAGE_SIZE);
		ret = copy_from_user((void *)xbuf[0],
			(void __user *)crypt_req->plaintext, size);
		if (ret < 0) {
			pr_debug("%s: copy_from_user failed (%d)\n", __func__, ret);
			goto process_req_buf_out;
		}
		sg_init_one(&in_sg, xbuf[0], size);
		sg_init_one(&out_sg, xbuf[1], size);

		ablkcipher_request_set_crypt(req, &in_sg,
			&out_sg, size, crypt_req->iv);

		INIT_COMPLETION(tcrypt_complete.restart);
		tcrypt_complete.req_err = 0;
		ret = crypt_req->encrypt ?
			crypto_ablkcipher_encrypt(req) :
			crypto_ablkcipher_decrypt(req);

		if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
			/* crypto driver is asynchronous */
			ret = wait_for_completion_interruptible(&tcrypt_complete.restart);

			if (ret < 0)
				goto process_req_buf_out;

			if (tcrypt_complete.req_err < 0) {
				ret = tcrypt_complete.req_err;
				goto process_req_buf_out;
			}
		} else if (ret < 0) {
			pr_debug("%scrypt failed (%d)\n",
				crypt_req->encrypt ? "en" : "de", ret);
			goto process_req_buf_out;
		}

		ret = copy_to_user((void __user *)crypt_req->result,
			(const void *)xbuf[1], size);
		if (ret < 0)
			goto process_req_buf_out;

		total -= size;
		crypt_req->result += size;
		crypt_req->plaintext += size;
	}

process_req_buf_out:
	free_bufs(xbuf);
process_req_out:
	ablkcipher_request_free(req);

	return ret;
}
Exemplo n.º 29
0
static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
					 const unsigned char *buffer,
					 int offset, size_t count)
{
	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
	struct onenand_chip *this = mtd->priv;
	dma_addr_t dma_src, dma_dst;
	int bram_offset;
	unsigned long timeout;
	void *buf = (void *)buffer;
	volatile unsigned *done;

	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
		goto out_copy;

	/* panic_write() may be in an interrupt context */
	if (in_interrupt() || oops_in_progress)
		goto out_copy;

	if (buf >= high_memory) {
		struct page *p1;

		if (((size_t)buf & PAGE_MASK) !=
		    ((size_t)(buf + count - 1) & PAGE_MASK))
			goto out_copy;
		p1 = vmalloc_to_page(buf);
		if (!p1)
			goto out_copy;
		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
	}

	dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
	dma_dst = c->phys_base + bram_offset;
	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
		dev_err(&c->pdev->dev,
			"Couldn't DMA map a %d byte buffer\n",
			count);
		return -1;
	}

	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
				     count >> 2, 1, 0, 0, 0);
	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
				dma_src, 0, 0);
	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
				 dma_dst, 0, 0);

	INIT_COMPLETION(c->dma_done);
	omap_start_dma(c->dma_channel);

	timeout = jiffies + msecs_to_jiffies(20);
	done = &c->dma_done.done;
	while (time_before(jiffies, timeout))
		if (*done)
			break;

	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);

	if (!*done) {
		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
		goto out_copy;
	}

	return 0;

out_copy:
	memcpy(this->base + bram_offset, buf, count);
	return 0;
}
/*
 * rtc6213n_set_seek - set seek
 */
static int rtc6213n_set_seek(struct rtc6213n_device *radio,
	unsigned int seek_wrap, unsigned int seek_up)
{
	int retval = 0;
	unsigned long timeout;
	bool timed_out = 0;

	dev_info(&radio->videodev->dev, "========= rtc6213n_set_seek ==========\n");
	dev_info(&radio->videodev->dev, "RTC6213n seeking process is starting\n");
	dev_info(&radio->videodev->dev, "CHANNEL=0x%4.4hx SEEKCFG1=0x%4.4hx STATUS=0x%4.4hx\n",
		radio->registers[CHANNEL], radio->registers[SEEKCFG1],
		radio->registers[STATUS]);

	if (seek_wrap)
		radio->registers[SEEKCFG1] &= ~SEEKCFG1_CSR0_SKMODE;
	else
		radio->registers[SEEKCFG1] |= SEEKCFG1_CSR0_SKMODE;

	if (seek_up)
		radio->registers[SEEKCFG1] |= SEEKCFG1_CSR0_SEEKUP;
	else
		radio->registers[SEEKCFG1] &= ~SEEKCFG1_CSR0_SEEKUP;

	retval = rtc6213n_set_register(radio, SEEKCFG1);
	if (retval < 0)
		goto done;

	/* start seeking */
	radio->registers[SEEKCFG1] |= SEEKCFG1_CSR0_SEEK;

	retval = rtc6213n_set_register(radio, SEEKCFG1);
	if (retval < 0)
		goto done;

	/* currently I2C driver only uses interrupt way to seek */
	if (radio->stci_enabled) {
		INIT_COMPLETION(radio->completion);

		/* wait till seek operation has completed */
		retval = wait_for_completion_timeout(&radio->completion,
			msecs_to_jiffies(seek_timeout));
		if (!retval) {
			dev_info(&radio->videodev->dev, "rtc6213n_set_seek : timed_out\n");
			timed_out = true;
		}
	} else	{
		/* wait till seek operation has completed */
		timeout = jiffies + msecs_to_jiffies(seek_timeout);
		do {
			retval = rtc6213n_get_register(radio, STATUS);
			if (retval < 0)
				goto stop;
			timed_out = time_after(jiffies, timeout);
		} while (((radio->registers[STATUS] & STATUS_STD) == 0) &&
			(!timed_out));
	}
	dev_info(&radio->videodev->dev, "RTC6213n seeking process is done\n");
	dev_info(&radio->videodev->dev, "CHANNEL=0x%4.4hx SEEKCFG1=0x%4.4hx STATUS=0x%4.4hx, STD = %d, SF = %d, RSSI = %d\n",
		radio->registers[CHANNEL], radio->registers[SEEKCFG1],
		radio->registers[STATUS],
		(radio->registers[STATUS] & STATUS_STD) >> 14,
		(radio->registers[STATUS] & STATUS_SF) >> 13,
		(radio->registers[RSSI] & RSSI_RSSI));

	if ((radio->registers[STATUS] & STATUS_STD) == 0)
		dev_info(&radio->videodev->dev, "seek does not complete\n");
	/*if (radio->registers[STATUS] & STATUS_SF)
		dev_info(&radio->videodev->dev,
			"seek failed / band limit reached\n");*/
	if (timed_out)
		dev_info(&radio->videodev->dev, "seek timed out after %u ms\n",
			seek_timeout);

stop:
	/* stop seeking : clear STD*/
	radio->registers[SEEKCFG1] &= ~SEEKCFG1_CSR0_SEEK;
	retval = rtc6213n_set_register(radio, SEEKCFG1);

done:
	if (radio->registers[STATUS] & STATUS_SF) {
		dev_info(&radio->videodev->dev, "seek failed / band limit reached\n");
		retval = ESPIPE;
	}
	/* try again, if timed out */
	else if ((retval == 0) && timed_out)
		retval = -EAGAIN;
	dev_info(&radio->videodev->dev, "========= rtc6213n_set_seek End ==========\n\n");

	return retval;
}