int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
{
	struct io_mgr *pio_mgr;
	struct dev_object *dev_obj;
	unsigned long flags;

	dev_obj = dev_get_first();
	dev_get_io_mgr(dev_obj, &pio_mgr);

	if (!pio_mgr)
		return NOTIFY_BAD;

	pio_mgr->intr_val = (u16)((u32)msg);
	if (pio_mgr->intr_val & MBX_PM_CLASS)
		io_dispatch_pm(pio_mgr);

	if (pio_mgr->intr_val == MBX_DEH_RESET) {
		pio_mgr->intr_val = 0;
	} else {
		spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
		pio_mgr->dpc_req++;
		spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
		tasklet_schedule(&pio_mgr->dpc_tasklet);
	}
	return NOTIFY_OK;
}
/*
 *  ======== bridge_msg_create ========
 *      Create an object to manage message queues. Only one of these objects
 *      can exist per device object.
 */
int bridge_msg_create(struct msg_mgr **msg_man,
			     struct dev_object *hdev_obj,
			     msg_onexit msg_callback)
{
	struct msg_mgr *msg_mgr_obj;
	struct io_mgr *hio_mgr;
	int status = 0;

	if (!msg_man || !msg_callback || !hdev_obj)
		return -EFAULT;

	dev_get_io_mgr(hdev_obj, &hio_mgr);
	if (!hio_mgr)
		return -EFAULT;

	*msg_man = NULL;
	/* Allocate msg_ctrl manager object */
	msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
	if (!msg_mgr_obj)
		return -ENOMEM;

	msg_mgr_obj->on_exit = msg_callback;
	msg_mgr_obj->iomgr = hio_mgr;
	/* List of MSG_QUEUEs */
	INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
	/*
	 * Queues of message frames for messages to the DSP. Message
	 * frames will only be added to the free queue when a
	 * msg_queue object is created.
	 */
	INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
	INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
	spin_lock_init(&msg_mgr_obj->msg_mgr_lock);

	/*
	 * Create an event to be used by bridge_msg_put() in waiting
	 * for an available free frame from the message manager.
	 */
	msg_mgr_obj->sync_event =
		kzalloc(sizeof(struct sync_object), GFP_KERNEL);
	if (!msg_mgr_obj->sync_event) {
		kfree(msg_mgr_obj);
		return -ENOMEM;
	}
	sync_init_event(msg_mgr_obj->sync_event);

	*msg_man = msg_mgr_obj;

	return status;
}
Example #3
0
/*
 *  ======== bridge_msg_create ========
 *      Create an object to manage message queues. Only one of these objects
 *      can exist per device object.
 */
int bridge_msg_create(OUT struct msg_mgr **phMsgMgr,
			     struct dev_object *hdev_obj,
			     msg_onexit msgCallback)
{
	struct msg_mgr *msg_mgr_obj;
	struct io_mgr *hio_mgr;
	int status = 0;

	if (!phMsgMgr || !msgCallback || !hdev_obj) {
		status = -EFAULT;
		goto func_end;
	}
	dev_get_io_mgr(hdev_obj, &hio_mgr);
	if (!hio_mgr) {
		status = -EFAULT;
		goto func_end;
	}
	*phMsgMgr = NULL;
	/* Allocate msg_ctrl manager object */
	msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);

	if (msg_mgr_obj) {
		msg_mgr_obj->on_exit = msgCallback;
		msg_mgr_obj->hio_mgr = hio_mgr;
		/* List of MSG_QUEUEs */
		msg_mgr_obj->queue_list = kzalloc(sizeof(struct lst_list),
							GFP_KERNEL);
		/*  Queues of message frames for messages to the DSP. Message
		 * frames will only be added to the free queue when a
		 * msg_queue object is created. */
		msg_mgr_obj->msg_free_list = kzalloc(sizeof(struct lst_list),
							GFP_KERNEL);
		msg_mgr_obj->msg_used_list = kzalloc(sizeof(struct lst_list),
							GFP_KERNEL);
		if (msg_mgr_obj->queue_list == NULL ||
		    msg_mgr_obj->msg_free_list == NULL ||
		    msg_mgr_obj->msg_used_list == NULL) {
			status = -ENOMEM;
		} else {
			INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
			INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
			INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
			spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
		}

		/*  Create an event to be used by bridge_msg_put() in waiting
		 *  for an available free frame from the message manager. */
		msg_mgr_obj->sync_event =
				kzalloc(sizeof(struct sync_object), GFP_KERNEL);
		if (!msg_mgr_obj->sync_event)
			status = -ENOMEM;
		else
			sync_init_event(msg_mgr_obj->sync_event);

		if (DSP_SUCCEEDED(status))
			*phMsgMgr = msg_mgr_obj;
		else
			delete_msg_mgr(msg_mgr_obj);

	} else {
		status = -ENOMEM;
	}
func_end:
	return status;
}
Example #4
0
/*
 *  ======== handle_hibernation_from_dsp ========
 *  	Handle Hibernation requested from DSP
 */
int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
{
	int status = 0;
#ifdef CONFIG_PM
	u16 timeout = PWRSTST_TIMEOUT / 10;
	u32 pwr_state;
#ifdef CONFIG_TIDSPBRIDGE_DVFS
	u32 opplevel;
	struct io_mgr *hio_mgr;
#endif
	struct omap_dsp_platform_data *pdata =
		omap_dspbridge_dev->dev.platform_data;

	pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
						OMAP_POWERSTATEST_MASK;
	/* Wait for DSP to move into OFF state */
	while ((pwr_state != PWRDM_POWER_OFF) && --timeout) {
		if (msleep_interruptible(10)) {
			pr_err("Waiting for DSP OFF mode interrupted\n");
			return -EPERM;
		}
		pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
					OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
	}
	if (timeout == 0) {
		pr_err("%s: Timed out waiting for DSP off mode\n", __func__);
		status = -ETIMEDOUT;
		return status;
	} else {

		/* Save mailbox settings */
		omap_mbox_save_ctx(dev_context->mbox);

		/* Turn off DSP Peripheral clocks and DSP Load monitor timer */
		status = dsp_clock_disable_all(dev_context->dsp_per_clks);

		/* Disable wdt on hibernation. */
		dsp_wdt_enable(false);

		if (!status) {
			/* Update the Bridger Driver state */
			dev_context->brd_state = BRD_DSP_HIBERNATION;
#ifdef CONFIG_TIDSPBRIDGE_DVFS
			status =
			    dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
			if (!hio_mgr) {
				status = DSP_EHANDLE;
				return status;
			}
			io_sh_msetting(hio_mgr, SHM_GETOPP, &opplevel);

			/*
			 * Set the OPP to low level before moving to OFF
			 * mode
			 */
			if (pdata->dsp_set_min_opp)
				(*pdata->dsp_set_min_opp) (VDD1_OPP1);
			status = 0;
#endif /* CONFIG_TIDSPBRIDGE_DVFS */
		}
	}
#endif
	return status;
}
Example #5
0
/*
 *  ======== bridge_brd_start ========
 *  purpose:
 *      Initializes DSP MMU and Starts DSP.
 *
 *  Preconditions:
 *  a) DSP domain is 'ACTIVE'.
 *  b) DSP_RST1 is asserted.
 *  b) DSP_RST2 is released.
 */
static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
				   u32 dsp_addr)
{
	int status = 0;
	struct bridge_dev_context *dev_context = dev_ctxt;
	struct iommu *mmu = NULL;
	struct shm_segs *sm_sg;
	int l4_i = 0, tlb_i = 0;
	u32 sg0_da = 0, sg1_da = 0;
	struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
	u32 dw_sync_addr = 0;
	u32 ul_shm_base;	/* Gpp Phys SM base addr(byte) */
	u32 ul_shm_base_virt;	/* Dsp Virt SM base addr */
	u32 ul_tlb_base_virt;	/* Base of MMU TLB entry */
	/* Offset of shm_base_virt from tlb_base_virt */
	u32 ul_shm_offset_virt;
	struct cfg_hostres *resources = NULL;
	u32 temp;
	u32 ul_dsp_clk_rate;
	u32 ul_dsp_clk_addr;
	u32 ul_bios_gp_timer;
	u32 clk_cmd;
	struct io_mgr *hio_mgr;
	u32 ul_load_monitor_timer;
	struct omap_dsp_platform_data *pdata =
		omap_dspbridge_dev->dev.platform_data;

	/* The device context contains all the mmu setup info from when the
	 * last dsp base image was loaded. The first entry is always
	 * SHMMEM base. */
	/* Get SHM_BEG - convert to byte address */
	(void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
			     &ul_shm_base_virt);
	ul_shm_base_virt *= DSPWORDSIZE;
	DBC_ASSERT(ul_shm_base_virt != 0);
	/* DSP Virtual address */
	ul_tlb_base_virt = dev_context->sh_s.seg0_da;
	DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
	ul_shm_offset_virt =
	    ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
	/* Kernel logical address */
	ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt;

	DBC_ASSERT(ul_shm_base != 0);
	/* 2nd wd is used as sync field */
	dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
	/* Write a signature into the shm base + offset; this will
	 * get cleared when the DSP program starts. */
	if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
		pr_err("%s: Illegal SM base\n", __func__);
		status = -EPERM;
	} else
		__raw_writel(0xffffffff, dw_sync_addr);

	if (!status) {
		resources = dev_context->resources;
		if (!resources)
			status = -EPERM;

		/* Assert RST1 i.e only the RST only for DSP megacell */
		if (!status) {
			(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
					OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
					OMAP2_RM_RSTCTRL);
			/* Mask address with 1K for compatibility */
			__raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
					OMAP343X_CTRL_REGADDR(
					OMAP343X_CONTROL_IVA2_BOOTADDR));
			/*
			 * Set bootmode to self loop if dsp_debug flag is true
			 */
			__raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
					OMAP343X_CTRL_REGADDR(
					OMAP343X_CONTROL_IVA2_BOOTMOD));
		}
	}

	if (!status) {
		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
		mmu = dev_context->dsp_mmu;
		if (mmu)
			dsp_mmu_exit(mmu);
		mmu = dsp_mmu_init();
		if (IS_ERR(mmu)) {
			dev_err(bridge, "dsp_mmu_init failed!\n");
			dev_context->dsp_mmu = NULL;
			status = (int)mmu;
		}
	}
	if (!status) {
		dev_context->dsp_mmu = mmu;
		sm_sg = &dev_context->sh_s;
		sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa,
			sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
		if (IS_ERR_VALUE(sg0_da)) {
			status = (int)sg0_da;
			sg0_da = 0;
		}
	}
	if (!status) {
		sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
			sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
		if (IS_ERR_VALUE(sg1_da)) {
			status = (int)sg1_da;
			sg1_da = 0;
		}
	}
	if (!status) {
		u32 da;
		for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
			if (!tlb[tlb_i].ul_gpp_pa)
				continue;

			dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size"
				" 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa,
				tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size);

			da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va,
				tlb[tlb_i].ul_gpp_pa, PAGE_SIZE,
				IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
			if (IS_ERR_VALUE(da)) {
				status = (int)da;
				break;
			}
		}
	}
	if (!status) {
		u32 da;
		l4_i = 0;
		while (l4_peripheral_table[l4_i].phys_addr) {
			da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
				dsp_virt_addr, l4_peripheral_table[l4_i].
				phys_addr, PAGE_SIZE,
				IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
			if (IS_ERR_VALUE(da)) {
				status = (int)da;
				break;
			}
			l4_i++;
		}
	}

	/* Lock the above TLB entries and get the BIOS and load monitor timer
	 * information */
	if (!status) {
		/* Enable the BIOS clock */
		(void)dev_get_symbol(dev_context->hdev_obj,
				     BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
		(void)dev_get_symbol(dev_context->hdev_obj,
				     BRIDGEINIT_LOADMON_GPTIMER,
				     &ul_load_monitor_timer);

		if (ul_load_monitor_timer != 0xFFFF) {
			clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
			    ul_load_monitor_timer;
			dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
		} else {
			dev_dbg(bridge, "Not able to get the symbol for Load "
				"Monitor Timer\n");
		}

		if (ul_bios_gp_timer != 0xFFFF) {
			clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
			    ul_bios_gp_timer;
			dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
		} else {
			dev_dbg(bridge,
				"Not able to get the symbol for BIOS Timer\n");
		}

		/* Set the DSP clock rate */
		(void)dev_get_symbol(dev_context->hdev_obj,
				     "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
		/*Set Autoidle Mode for IVA2 PLL */
		(*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
				OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);

		if ((unsigned int *)ul_dsp_clk_addr != NULL) {
			/* Get the clock rate */
			ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
			dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
				__func__, ul_dsp_clk_rate);
			(void)bridge_brd_write(dev_context,
					       (u8 *) &ul_dsp_clk_rate,
					       ul_dsp_clk_addr, sizeof(u32), 0);
		}
		/*
		 * Enable Mailbox events and also drain any pending
		 * stale messages.
		 */
		dev_context->mbox = omap_mbox_get("dsp");
		if (IS_ERR(dev_context->mbox)) {
			dev_context->mbox = NULL;
			pr_err("%s: Failed to get dsp mailbox handle\n",
								__func__);
			status = -EPERM;
		}

	}
	if (!status) {
		dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;

/*PM_IVA2GRPSEL_PER = 0xC0;*/
		temp = readl(resources->dw_per_pm_base + 0xA8);
		temp = (temp & 0xFFFFFF30) | 0xC0;
		writel(temp, resources->dw_per_pm_base + 0xA8);

/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
		temp = readl(resources->dw_per_pm_base + 0xA4);
		temp = (temp & 0xFFFFFF3F);
		writel(temp, resources->dw_per_pm_base + 0xA4);
/*CM_SLEEPDEP_PER |= 0x04; */
		temp = readl(resources->dw_per_base + 0x44);
		temp = (temp & 0xFFFFFFFB) | 0x04;
		writel(temp, resources->dw_per_base + 0x44);

/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
		(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
					OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);

		/* Let DSP go */
		dev_dbg(bridge, "%s Unreset\n", __func__);
		/* release the RST1, DSP starts executing now .. */
		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);

		dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
		dev_dbg(bridge, "DSP c_int00 Address =  0x%x\n", dsp_addr);
		if (dsp_debug)
			while (__raw_readw(dw_sync_addr))
				;;

		/* Wait for DSP to clear word in shared memory */
		/* Read the Location */
		if (!wait_for_start(dev_context, dw_sync_addr))
			status = -ETIMEDOUT;

		/* Start wdt */
		dsp_wdt_sm_set((void *)ul_shm_base);
		dsp_wdt_enable(true);

		status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
		if (hio_mgr) {
			io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
			/* Write the synchronization bit to indicate the
			 * completion of OPP table update to DSP
			 */
			__raw_writel(0XCAFECAFE, dw_sync_addr);

			/* update board state */
			dev_context->dw_brd_state = BRD_RUNNING;
			return 0;
		} else {
			dev_context->dw_brd_state = BRD_UNKNOWN;
		}
	}

	while (tlb_i--) {
		if (!tlb[tlb_i].ul_gpp_pa)
			continue;
		iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
	}
	while (l4_i--)
		iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
	if (sg0_da)
		iommu_kunmap(mmu, sg0_da);
	if (sg1_da)
		iommu_kunmap(mmu, sg1_da);
	return status;
}