/*
 * this sysfs is intended to retrieve two MPU addresses
 * needed for the INST2 utility.
 * the inst_log script will run this sysfs
 */
static ssize_t mpu_address_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct wmd_dev_context *dw_context = NULL;
	struct dev_object *hdev_obj = NULL;
	u32 mem_poolsize = 0;
	u32 GppPa = 0, DspVa = 0;
	u32 armPhyMemOffUncached = 0;
	struct dspbridge_platform_data *pdata = bridge->platform_data;
	hdev_obj = (struct dev_object *)drv_get_first_dev_object();
	dev_get_wmd_context(hdev_obj, &dw_context);
	if (!dw_context) {
		pr_err("%s: failed to get the dev context handle\n", __func__);
		return 0;
	}
	GppPa = dw_context->atlb_entry[0].ul_gpp_pa;
	DspVa = dw_context->atlb_entry[0].ul_dsp_va;

	/*
	 * the physical address offset, this offset is a
	 * fixed value for a given platform.
	 */
	armPhyMemOffUncached = GppPa - DspVa;

	/*
	 * the offset value for cached address region
	 * on DSP address space
	 */
	mem_poolsize = pdata->phys_mempool_base - 0x20000000;

	/* Retrive the above calculated addresses */
	return sprintf(buf, "mempoolsizeOffset 0x%x GppPaOffset 0x%x\n",
		       mem_poolsize, armPhyMemOffUncached);
}
static ssize_t wdt3_store(struct device *dev, struct device_attribute *attr,
			  const char *buf, size_t n)
{
	u32 wdt3;
	struct dev_object *dev_object;
	struct wmd_dev_context *dev_ctxt;

	if (sscanf(buf, "%d", &wdt3) != 1)
		return -EINVAL;

	dev_object = dev_get_first();
	if (dev_object == NULL)
		goto func_end;
	dev_get_wmd_context(dev_object, &dev_ctxt);
	if (dev_ctxt == NULL)
		goto func_end;

	/* enable WDT */
	if (wdt3 == 1) {
		if (dsp_wdt_get_enable())
			goto func_end;
		dsp_wdt_set_enable(true);
		if (!clk_get_use_cnt(SERVICESCLK_WDT3_FCK) &&
		    dev_ctxt->dw_brd_state != BRD_DSP_HIBERNATION)
			dsp_wdt_enable(true);
	} else if (wdt3 == 0) {
		if (!dsp_wdt_get_enable())
			goto func_end;
		if (clk_get_use_cnt(SERVICESCLK_WDT3_FCK))
			dsp_wdt_enable(false);
		dsp_wdt_set_enable(false);
	}
func_end:
	return n;
}
Пример #3
0
int send_mbox_callback(void *arg)
{
	struct wmd_dev_context *dev_context;
	struct cfg_hostres *resources;
	u32 temp;
	struct dspbridge_platform_data *pdata =
		omap_dspbridge_dev->dev.platform_data;

	dev_get_wmd_context(dev_get_first(), &dev_context);

	if (!dev_context || !dev_context->resources)
		return -EFAULT;

	resources = dev_context->resources;
	if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
	    dev_context->dw_brd_state == BRD_HIBERNATION) {
		/* Restart the peripheral clocks */
		dsp_peripheral_clocks_enable(dev_context, NULL);

#ifdef CONFIG_BRIDGE_WDT3
		dsp_wdt_enable(true);
#endif

		/*
		 * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control
		 *     in CM_AUTOIDLE_PLL_IVA2 register
		 */
		(*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
				OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);

		/*
		 * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to
		 *     0.75 MHz - 1.0 MHz
		 * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode
		 */
		(*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK |
				OMAP3430_EN_IVA2_DPLL_MASK,
				0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT |
				0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT,
				OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL);

		/* Restore mailbox settings */
		omap_mbox_restore_ctx(dev_context->mbox);

		/* Access MMU SYS CONFIG register to generate a short wakeup */
		temp = *(reg_uword32 *) (resources->dw_dmmu_base + 0x10);

		dev_context->dw_brd_state = BRD_RUNNING;
	} else if (dev_context->dw_brd_state == BRD_RETENTION) {
		/* Restart the peripheral clocks */
		dsp_peripheral_clocks_enable(dev_context, NULL);
		dev_context->dw_brd_state = BRD_RUNNING;
	}

	return 0;
}
static int BRIDGE_RESUME(struct platform_device *pdev)
{
	u32 status = 0;
	struct wmd_dev_context *dev_ctxt;

	dev_get_wmd_context(dev_get_first(), &dev_ctxt);
	if (!dev_ctxt)
		return -EFAULT;

	/*
	 * only wake up the DSP if it was not in Hibernation before the
	 * suspend transition
	 */
	if (dev_ctxt->dw_brd_state != BRD_DSP_HIBERNATION)
		status = pwr_wake_dsp(time_out);

	if (DSP_FAILED(status))
		return status;

	bridge_suspend_data.suspended = 0;
	wake_up(&bridge_suspend_data.suspend_wq);
	return 0;
}
Пример #5
0
/*
 *  ======== bridge_chnl_add_io_req ========
 *      Enqueue an I/O request for data transfer on a channel to the DSP.
 *      The direction (mode) is specified in the channel object. Note the DSP
 *      address is specified for channels opened in direct I/O mode.
 */
int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *pHostBuf,
			       u32 byte_size, u32 buf_size,
			       OPTIONAL u32 dw_dsp_addr, u32 dw_arg)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj = NULL;
	struct wmd_dev_context *dev_ctxt;
	struct dev_object *dev_obj;
	u8 dw_state;
	bool is_eos;
	struct chnl_mgr *chnl_mgr_obj;
	u8 *host_sys_buf = NULL;
	bool sched_dpc = false;
	u16 mb_val = 0;

	is_eos = (byte_size == 0);

	/* Validate args:  */
	if (pHostBuf == NULL) {
		status = -EFAULT;
	} else if (!pchnl) {
		status = -EFAULT;
	} else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
		status = -EPERM;
	} else {
		/* Check the channel state: only queue chirp if channel state
		 * allows */
		dw_state = pchnl->dw_state;
		if (dw_state != CHNL_STATEREADY) {
			if (dw_state & CHNL_STATECANCEL)
				status = -ECANCELED;
			else if ((dw_state & CHNL_STATEEOS)
				 && CHNL_IS_OUTPUT(pchnl->chnl_mode))
				status = -EPIPE;
			else
				/* No other possible states left: */
				DBC_ASSERT(0);
		}
	}

	if (DSP_FAILED(status))
		goto func_end;

	chnl_mgr_obj = pchnl->chnl_mgr_obj;

	dev_obj = dev_get_first();
	dev_get_wmd_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt || !chnl_mgr_obj)
		status = -EFAULT;

	if (DSP_FAILED(status))
		goto func_end;

	if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && pHostBuf) {
		if (!(pHostBuf < (void *)USERMODE_ADDR)) {
			host_sys_buf = pHostBuf;
			goto func_cont;
		}
		/* if addr in user mode, then copy to kernel space */
		host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
		if (host_sys_buf == NULL) {
			status = -ENOMEM;
			goto func_end;
		}
		if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
			status = copy_from_user(host_sys_buf, pHostBuf,
						buf_size);
			if (status) {
				kfree(host_sys_buf);
				host_sys_buf = NULL;
				status = -EFAULT;
				goto func_end;
			}
		}
	}
func_cont:
	/* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
	 * channels. DPCCS is held to avoid race conditions with PCPY channels.
	 * If DPC is scheduled in process context (iosm_schedule) and any
	 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
	 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (pchnl->chnl_type == CHNL_PCPY) {
		/* This is a processor-copy channel. */
		if (DSP_SUCCEEDED(status) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
			/* Check buffer size on output channels for fit. */
			if (byte_size >
			    io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
				status = -EINVAL;

		}
	}
	if (DSP_SUCCEEDED(status)) {
		/* Get a free chirp: */
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
		if (chnl_packet_obj == NULL)
			status = -EIO;

	}
	if (DSP_SUCCEEDED(status)) {
		/* Enqueue the chirp on the chnl's IORequest queue: */
		chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
		    pHostBuf;
		if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
			chnl_packet_obj->host_sys_buf = host_sys_buf;

		/*
		 * Note: for dma chans dw_dsp_addr contains dsp address
		 * of SM buffer.
		 */
		DBC_ASSERT(chnl_mgr_obj->word_size != 0);
		/* DSP address */
		chnl_packet_obj->dsp_tx_addr =
		    dw_dsp_addr / chnl_mgr_obj->word_size;
		chnl_packet_obj->byte_size = byte_size;
		chnl_packet_obj->buf_size = buf_size;
		/* Only valid for output channel */
		chnl_packet_obj->dw_arg = dw_arg;
		chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
					   CHNL_IOCSTATCOMPLETE);
		lst_put_tail(pchnl->pio_requests,
			     (struct list_head *)chnl_packet_obj);
		pchnl->cio_reqs++;
		DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
		/* If end of stream, update the channel state to prevent
		 * more IOR's: */
		if (is_eos)
			pchnl->dw_state |= CHNL_STATEEOS;

		/* Legacy DSM Processor-Copy */
		DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
		/* Request IO from the DSP */
		io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
				(CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
				 IO_OUTPUT), &mb_val);
		sched_dpc = true;

	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	if (mb_val != 0)
		io_intr_dsp2(chnl_mgr_obj->hio_mgr, mb_val);

	/* Schedule a DPC, to do the actual data transfer: */
	if (sched_dpc)
		iosm_schedule(chnl_mgr_obj->hio_mgr);

func_end:
	return status;
}
Пример #6
0
/*
 *  ======== bridge_chnl_get_ioc ========
 *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
 *      completion record, which contains information about the completed
 *      I/O request.
 *      Note: Ensures Channel Invariant (see notes above).
 */
int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 dwTimeOut,
			    OUT struct chnl_ioc *pIOC)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj;
	int stat_sync;
	bool dequeue_ioc = true;
	struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
	u8 *host_sys_buf = NULL;
	struct wmd_dev_context *dev_ctxt;
	struct dev_object *dev_obj;

	/* Check args: */
	if (pIOC == NULL) {
		status = -EFAULT;
	} else if (!pchnl) {
		status = -EFAULT;
	} else if (dwTimeOut == CHNL_IOCNOWAIT) {
		if (LST_IS_EMPTY(pchnl->pio_completions))
			status = -EREMOTEIO;

	}

	dev_obj = dev_get_first();
	dev_get_wmd_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = -EFAULT;

	if (DSP_FAILED(status))
		goto func_end;

	ioc.status = CHNL_IOCSTATCOMPLETE;
	if (dwTimeOut !=
	    CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
		if (dwTimeOut == CHNL_IOCINFINITE)
			dwTimeOut = SYNC_INFINITE;

		stat_sync = sync_wait_on_event(pchnl->sync_event, dwTimeOut);
		if (stat_sync == -ETIME) {
			/* No response from DSP */
			ioc.status |= CHNL_IOCSTATTIMEOUT;
			dequeue_ioc = false;
		} else if (stat_sync == -EPERM) {
			/* This can occur when the user mode thread is
			 * aborted (^C), or when _VWIN32_WaitSingleObject()
			 * fails due to unkown causes. */
			/* Even though Wait failed, there may be something in
			 * the Q: */
			if (LST_IS_EMPTY(pchnl->pio_completions)) {
				ioc.status |= CHNL_IOCSTATCANCEL;
				dequeue_ioc = false;
			}
		}
	}
	/* See comment in AddIOReq */
	spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (dequeue_ioc) {
		/* Dequeue IOC and set pIOC; */
		DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
		/* Update pIOC from channel state and chirp: */
		if (chnl_packet_obj) {
			pchnl->cio_cs--;
			/*  If this is a zero-copy channel, then set IOC's pbuf
			 *  to the DSP's address. This DSP address will get
			 *  translated to user's virtual addr later. */
			{
				host_sys_buf = chnl_packet_obj->host_sys_buf;
				ioc.pbuf = chnl_packet_obj->host_user_buf;
			}
			ioc.byte_size = chnl_packet_obj->byte_size;
			ioc.buf_size = chnl_packet_obj->buf_size;
			ioc.dw_arg = chnl_packet_obj->dw_arg;
			ioc.status |= chnl_packet_obj->status;
			/* Place the used chirp on the free list: */
			lst_put_tail(pchnl->free_packets_list,
				     (struct list_head *)chnl_packet_obj);
		} else {
			ioc.pbuf = NULL;
			ioc.byte_size = 0;
		}
	} else {
		ioc.pbuf = NULL;
		ioc.byte_size = 0;
		ioc.dw_arg = 0;
		ioc.buf_size = 0;
	}
	/* Ensure invariant: If any IOC's are queued for this channel... */
	if (!LST_IS_EMPTY(pchnl->pio_completions)) {
		/*  Since DSPStream_Reclaim() does not take a timeout
		 *  parameter, we pass the stream's timeout value to
		 *  bridge_chnl_get_ioc. We cannot determine whether or not
		 *  we have waited in User mode. Since the stream's timeout
		 *  value may be non-zero, we still have to set the event.
		 *  Therefore, this optimization is taken out.
		 *
		 *  if (dwTimeOut == CHNL_IOCNOWAIT) {
		 *    ... ensure event is set..
		 *      sync_set_event(pchnl->sync_event);
		 *  } */
		sync_set_event(pchnl->sync_event);
	} else {
		/* else, if list is empty, ensure event is reset. */
		sync_reset_event(pchnl->sync_event);
	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	if (dequeue_ioc
	    && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
		if (!(ioc.pbuf < (void *)USERMODE_ADDR))
			goto func_cont;

		/* If the addr is in user mode, then copy it */
		if (!host_sys_buf || !ioc.pbuf) {
			status = -EFAULT;
			goto func_cont;
		}
		if (!CHNL_IS_INPUT(pchnl->chnl_mode))
			goto func_cont1;

		/*host_user_buf */
		status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
		if (status) {
			if (current->flags & PF_EXITING)
				status = 0;
		}
		if (status)
			status = -EFAULT;
func_cont1:
		kfree(host_sys_buf);
	}
func_cont:
	/* Update User's IOC block: */
	*pIOC = ioc;
func_end:
	return status;
}
Пример #7
0
/*
 *  ======== bridge_deh_create ========
 *      Creates DEH manager object.
 */
int bridge_deh_create(OUT struct deh_mgr **phDehMgr,
			     struct dev_object *hdev_obj)
{
	int status = 0;
	struct deh_mgr *deh_mgr_obj = NULL;
	struct wmd_dev_context *hwmd_context = NULL;

	/*  Message manager will be created when a file is loaded, since
	 *  size of message buffer in shared memory is configurable in
	 *  the base image. */
	/* Get WMD context info. */
	dev_get_wmd_context(hdev_obj, &hwmd_context);
	DBC_ASSERT(hwmd_context);
	dummy_va_addr = 0;
	/* Allocate IO manager object: */
	deh_mgr_obj = kzalloc(sizeof(struct deh_mgr), GFP_KERNEL);
	if (deh_mgr_obj == NULL) {
		status = -ENOMEM;
	} else {
		/* Create an NTFY object to manage notifications */
		deh_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (deh_mgr_obj->ntfy_obj)
			ntfy_init(deh_mgr_obj->ntfy_obj);
		else
			status = -ENOMEM;

		deh_mgr_obj->mmu_wq = create_workqueue("dsp-mmu_wq");
		if (!deh_mgr_obj->mmu_wq)
			status = -ENOMEM;

		INIT_WORK(&deh_mgr_obj->fault_work, mmu_fault_work);


		if (DSP_SUCCEEDED(status)) {
			/* Fill in context structure */
			deh_mgr_obj->hwmd_context = hwmd_context;
			deh_mgr_obj->err_info.dw_err_mask = 0L;
			deh_mgr_obj->err_info.dw_val1 = 0L;
			deh_mgr_obj->err_info.dw_val2 = 0L;
			deh_mgr_obj->err_info.dw_val3 = 0L;
			/* Install ISR function for DSP MMU fault */
			if ((request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
					 "DspBridge\tiommu fault",
					 (void *)deh_mgr_obj)) == 0)
				status = 0;
			else
				status = -EPERM;
		}
	}
	if (DSP_FAILED(status)) {
		/* If create failed, cleanup */
		bridge_deh_destroy((struct deh_mgr *)deh_mgr_obj);
		*phDehMgr = NULL;
	} else {
		timer = omap_dm_timer_request_specific(
					GPTIMER_FOR_DSP_MMU_FAULT);
		if (timer)
			omap_dm_timer_disable(timer);
		else {
			pr_err("%s:GPTimer not available\n", __func__);
			return -ENODEV;
		}
		*phDehMgr = (struct deh_mgr *)deh_mgr_obj;
	}

	return status;
}