Exemple #1
0
static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
	struct omap_mbox_queue *mq = mbox->rxq;
	mbox_msg_t msg;
	int len;

	while (!mbox_fifo_empty(mbox)) {
		if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
			omap_mbox_disable_irq(mbox, IRQ_RX);
			mq->full = true;
			goto nomem;
		}

		msg = mbox_fifo_read(mbox);

		len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
		WARN_ON(len != sizeof(msg));

		if (mbox->ops->type == OMAP_MBOX_TYPE1)
			break;
	}

	/* no more messages in the fifo. clear IRQ source. */
	ack_mbox_irq(mbox, IRQ_RX);
nomem:
	schedule_work(&mbox->rxq->work);
}
Exemple #2
0
static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
	struct omap_mbox_queue *mq = mbox->rxq;
	mbox_msg_t msg;
	int len;

	while (!mbox_fifo_empty(mbox)) {
		if (unlikely(__kfifo_len(mq->fifo) >=
				FIFO_SIZE - sizeof(msg))) {
			omap_mbox_disable_irq(mbox, IRQ_RX);
			rq_full = true;
			goto nomem;
		}

		msg = mbox_fifo_read(mbox);

		len = __kfifo_put(mq->fifo, (unsigned char *)&msg, sizeof(msg));
		if (unlikely(len != sizeof(msg)))
			pr_err("%s: __kfifo_put anomaly detected\n", __func__);
		if (mbox->ops->type == OMAP_MBOX_TYPE1)
			break;
	}

	/* no more messages in the fifo. clear IRQ source. */
	ack_mbox_irq(mbox, IRQ_RX);
nomem:
	queue_work(mboxd, &mbox->rxq->work);
}
static int am33xx_pm_begin(suspend_state_t state)
{
	int ret = 0;
	int state_id = 0;

	disable_hlt();

	switch (state) {
	case PM_SUSPEND_STANDBY:
		state_id = 0xb;
		break;
	case PM_SUSPEND_MEM:
		state_id = 0x3;
		break;
	}

	/*
	 * Populate the resume address as part of IPC data
	 * The offset to be added comes from sleep33xx.S
	 * Add 4 bytes to ensure that resume happens from
	 * the word *after* the word which holds the resume offset
	 */
	am33xx_lp_ipc.resume_addr = (DS_RESUME_BASE + am33xx_resume_offset + 4);
	am33xx_lp_ipc.sleep_mode  = state_id;
	am33xx_lp_ipc.ipc_data1	  = DS_IPC_DEFAULT;
	am33xx_lp_ipc.ipc_data2   = DS_IPC_DEFAULT;

	am33xx_ipc_cmd(&am33xx_lp_ipc);

	m3_state = M3_STATE_MSG_FOR_LP;

	omap_mbox_enable_irq(m3_mbox, IRQ_RX);

	ret = omap_mbox_msg_send(m3_mbox, 0xABCDABCD);
	if (ret) {
		pr_err("A8<->CM3 MSG for LP failed\n");
		am33xx_m3_state_machine_reset();
		ret = -1;
	}

	if (!wait_for_completion_timeout(&a8_m3_sync, msecs_to_jiffies(5000))) {
		pr_err("A8<->CM3 sync failure\n");
		am33xx_m3_state_machine_reset();
		ret = -1;
	} else {
		pr_debug("Message sent for entering %s\n",
			(DS_MODE == DS0_ID ? "DS0" : "DS1"));
		omap_mbox_disable_irq(m3_mbox, IRQ_RX);
	}

	suspend_state = state;
	return ret;
}
Exemple #4
0
static void omap_mbox_fini(struct omap_mbox *mbox)
{
	mutex_lock(&mbox_configured_lock);

	if (!--mbox->use_count) {
		omap_mbox_disable_irq(mbox, IRQ_RX);
		free_irq(mbox->irq, mbox);
		tasklet_kill(&mbox->txq->tasklet);
		flush_work(&mbox->rxq->work);
		mbox_queue_free(mbox->txq);
		mbox_queue_free(mbox->rxq);
	}

	if (likely(mbox->ops->shutdown)) {
		if (!--mbox_configured)
			mbox->ops->shutdown(mbox);
	}

	mutex_unlock(&mbox_configured_lock);
}
Exemple #5
0
static int am33xx_pm_begin(suspend_state_t state)
{
	int ret = 0;

	disable_hlt();

	am33xx_lp_ipc.resume_addr = DS_RESUME_ADDR;
	am33xx_lp_ipc.sleep_mode  = DS_MODE;
	am33xx_lp_ipc.ipc_data1	  = DS_IPC_DEFAULT;
	am33xx_lp_ipc.ipc_data2   = DS_IPC_DEFAULT;

	am33xx_ipc_cmd(&am33xx_lp_ipc);

	m3_state = M3_STATE_MSG_FOR_LP;

	omap_mbox_enable_irq(m3_mbox, IRQ_RX);

	ret = omap_mbox_msg_send(m3_mbox, 0xABCDABCD);
	if (ret) {
		pr_err("A8<->CM3 MSG for LP failed\n");
		am33xx_m3_state_machine_reset();
		ret = -1;
	}

	if (!wait_for_completion_timeout(&a8_m3_sync, msecs_to_jiffies(5000))) {
		pr_err("A8<->CM3 sync failure\n");
		am33xx_m3_state_machine_reset();
		ret = -1;
	} else {
		pr_debug("Message sent for entering %s\n",
			(DS_MODE == DS0_ID ? "DS0" : "DS1"));
		omap_mbox_disable_irq(m3_mbox, IRQ_RX);
	}

	suspend_state = state;
	return ret;
}
Exemple #6
0
/*
 *  ======== bridge_chnl_add_io_req ========
 *      Enqueue an I/O request for data transfer on a channel to the DSP.
 *      The direction (mode) is specified in the channel object. Note the DSP
 *      address is specified for channels opened in direct I/O mode.
 */
int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *pHostBuf,
			       u32 byte_size, u32 buf_size,
			       OPTIONAL u32 dw_dsp_addr, u32 dw_arg)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj = NULL;
	struct wmd_dev_context *dev_ctxt;
	struct dev_object *dev_obj;
	u8 dw_state;
	bool is_eos;
	struct chnl_mgr *chnl_mgr_obj;
	u8 *host_sys_buf = NULL;
	bool sched_dpc = false;
	u16 mb_val = 0;

	is_eos = (byte_size == 0);

	/* Validate args:  */
	if (pHostBuf == NULL) {
		status = -EFAULT;
	} else if (!pchnl) {
		status = -EFAULT;
	} else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
		status = -EPERM;
	} else {
		/* Check the channel state: only queue chirp if channel state
		 * allows */
		dw_state = pchnl->dw_state;
		if (dw_state != CHNL_STATEREADY) {
			if (dw_state & CHNL_STATECANCEL)
				status = -ECANCELED;
			else if ((dw_state & CHNL_STATEEOS)
				 && CHNL_IS_OUTPUT(pchnl->chnl_mode))
				status = -EPIPE;
			else
				/* No other possible states left: */
				DBC_ASSERT(0);
		}
	}

	if (DSP_FAILED(status))
		goto func_end;

	chnl_mgr_obj = pchnl->chnl_mgr_obj;

	dev_obj = dev_get_first();
	dev_get_wmd_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt || !chnl_mgr_obj)
		status = -EFAULT;

	if (DSP_FAILED(status))
		goto func_end;

	if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && pHostBuf) {
		if (!(pHostBuf < (void *)USERMODE_ADDR)) {
			host_sys_buf = pHostBuf;
			goto func_cont;
		}
		/* if addr in user mode, then copy to kernel space */
		host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
		if (host_sys_buf == NULL) {
			status = -ENOMEM;
			goto func_end;
		}
		if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
			status = copy_from_user(host_sys_buf, pHostBuf,
						buf_size);
			if (status) {
				kfree(host_sys_buf);
				host_sys_buf = NULL;
				status = -EFAULT;
				goto func_end;
			}
		}
	}
func_cont:
	/* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
	 * channels. DPCCS is held to avoid race conditions with PCPY channels.
	 * If DPC is scheduled in process context (iosm_schedule) and any
	 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
	 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (pchnl->chnl_type == CHNL_PCPY) {
		/* This is a processor-copy channel. */
		if (DSP_SUCCEEDED(status) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
			/* Check buffer size on output channels for fit. */
			if (byte_size >
			    io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
				status = -EINVAL;

		}
	}
	if (DSP_SUCCEEDED(status)) {
		/* Get a free chirp: */
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
		if (chnl_packet_obj == NULL)
			status = -EIO;

	}
	if (DSP_SUCCEEDED(status)) {
		/* Enqueue the chirp on the chnl's IORequest queue: */
		chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
		    pHostBuf;
		if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
			chnl_packet_obj->host_sys_buf = host_sys_buf;

		/*
		 * Note: for dma chans dw_dsp_addr contains dsp address
		 * of SM buffer.
		 */
		DBC_ASSERT(chnl_mgr_obj->word_size != 0);
		/* DSP address */
		chnl_packet_obj->dsp_tx_addr =
		    dw_dsp_addr / chnl_mgr_obj->word_size;
		chnl_packet_obj->byte_size = byte_size;
		chnl_packet_obj->buf_size = buf_size;
		/* Only valid for output channel */
		chnl_packet_obj->dw_arg = dw_arg;
		chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
					   CHNL_IOCSTATCOMPLETE);
		lst_put_tail(pchnl->pio_requests,
			     (struct list_head *)chnl_packet_obj);
		pchnl->cio_reqs++;
		DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
		/* If end of stream, update the channel state to prevent
		 * more IOR's: */
		if (is_eos)
			pchnl->dw_state |= CHNL_STATEEOS;

		/* Legacy DSM Processor-Copy */
		DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
		/* Request IO from the DSP */
		io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
				(CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
				 IO_OUTPUT), &mb_val);
		sched_dpc = true;

	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	if (mb_val != 0)
		io_intr_dsp2(chnl_mgr_obj->hio_mgr, mb_val);

	/* Schedule a DPC, to do the actual data transfer: */
	if (sched_dpc)
		iosm_schedule(chnl_mgr_obj->hio_mgr);

func_end:
	return status;
}
Exemple #7
0
/*
 *  ======== bridge_chnl_get_ioc ========
 *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
 *      completion record, which contains information about the completed
 *      I/O request.
 *      Note: Ensures Channel Invariant (see notes above).
 */
int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 dwTimeOut,
			    OUT struct chnl_ioc *pIOC)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj;
	int stat_sync;
	bool dequeue_ioc = true;
	struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
	u8 *host_sys_buf = NULL;
	struct wmd_dev_context *dev_ctxt;
	struct dev_object *dev_obj;

	/* Check args: */
	if (pIOC == NULL) {
		status = -EFAULT;
	} else if (!pchnl) {
		status = -EFAULT;
	} else if (dwTimeOut == CHNL_IOCNOWAIT) {
		if (LST_IS_EMPTY(pchnl->pio_completions))
			status = -EREMOTEIO;

	}

	dev_obj = dev_get_first();
	dev_get_wmd_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = -EFAULT;

	if (DSP_FAILED(status))
		goto func_end;

	ioc.status = CHNL_IOCSTATCOMPLETE;
	if (dwTimeOut !=
	    CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
		if (dwTimeOut == CHNL_IOCINFINITE)
			dwTimeOut = SYNC_INFINITE;

		stat_sync = sync_wait_on_event(pchnl->sync_event, dwTimeOut);
		if (stat_sync == -ETIME) {
			/* No response from DSP */
			ioc.status |= CHNL_IOCSTATTIMEOUT;
			dequeue_ioc = false;
		} else if (stat_sync == -EPERM) {
			/* This can occur when the user mode thread is
			 * aborted (^C), or when _VWIN32_WaitSingleObject()
			 * fails due to unkown causes. */
			/* Even though Wait failed, there may be something in
			 * the Q: */
			if (LST_IS_EMPTY(pchnl->pio_completions)) {
				ioc.status |= CHNL_IOCSTATCANCEL;
				dequeue_ioc = false;
			}
		}
	}
	/* See comment in AddIOReq */
	spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (dequeue_ioc) {
		/* Dequeue IOC and set pIOC; */
		DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
		/* Update pIOC from channel state and chirp: */
		if (chnl_packet_obj) {
			pchnl->cio_cs--;
			/*  If this is a zero-copy channel, then set IOC's pbuf
			 *  to the DSP's address. This DSP address will get
			 *  translated to user's virtual addr later. */
			{
				host_sys_buf = chnl_packet_obj->host_sys_buf;
				ioc.pbuf = chnl_packet_obj->host_user_buf;
			}
			ioc.byte_size = chnl_packet_obj->byte_size;
			ioc.buf_size = chnl_packet_obj->buf_size;
			ioc.dw_arg = chnl_packet_obj->dw_arg;
			ioc.status |= chnl_packet_obj->status;
			/* Place the used chirp on the free list: */
			lst_put_tail(pchnl->free_packets_list,
				     (struct list_head *)chnl_packet_obj);
		} else {
			ioc.pbuf = NULL;
			ioc.byte_size = 0;
		}
	} else {
		ioc.pbuf = NULL;
		ioc.byte_size = 0;
		ioc.dw_arg = 0;
		ioc.buf_size = 0;
	}
	/* Ensure invariant: If any IOC's are queued for this channel... */
	if (!LST_IS_EMPTY(pchnl->pio_completions)) {
		/*  Since DSPStream_Reclaim() does not take a timeout
		 *  parameter, we pass the stream's timeout value to
		 *  bridge_chnl_get_ioc. We cannot determine whether or not
		 *  we have waited in User mode. Since the stream's timeout
		 *  value may be non-zero, we still have to set the event.
		 *  Therefore, this optimization is taken out.
		 *
		 *  if (dwTimeOut == CHNL_IOCNOWAIT) {
		 *    ... ensure event is set..
		 *      sync_set_event(pchnl->sync_event);
		 *  } */
		sync_set_event(pchnl->sync_event);
	} else {
		/* else, if list is empty, ensure event is reset. */
		sync_reset_event(pchnl->sync_event);
	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	if (dequeue_ioc
	    && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
		if (!(ioc.pbuf < (void *)USERMODE_ADDR))
			goto func_cont;

		/* If the addr is in user mode, then copy it */
		if (!host_sys_buf || !ioc.pbuf) {
			status = -EFAULT;
			goto func_cont;
		}
		if (!CHNL_IS_INPUT(pchnl->chnl_mode))
			goto func_cont1;

		/*host_user_buf */
		status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
		if (status) {
			if (current->flags & PF_EXITING)
				status = 0;
		}
		if (status)
			status = -EFAULT;
func_cont1:
		kfree(host_sys_buf);
	}
func_cont:
	/* Update User's IOC block: */
	*pIOC = ioc;
func_end:
	return status;
}
Exemple #8
0
/*
 * Mailbox interrupt handler
 */
static void __mbox_tx_interrupt(struct omap_mbox *mbox)
{
	omap_mbox_disable_irq(mbox, IRQ_TX);
	ack_mbox_irq(mbox, IRQ_TX);
	tasklet_schedule(&mbox->txq->tasklet);
}
/*
 *  ======== WMD_CHNL_AddIOReq ========
 *      Enqueue an I/O request for data transfer on a channel to the DSP.
 *      The direction (mode) is specified in the channel object. Note the DSP
 *      address is specified for channels opened in direct I/O mode.
 */
DSP_STATUS WMD_CHNL_AddIOReq(struct CHNL_OBJECT *hChnl, void *pHostBuf,
			    u32 cBytes, u32 cBufSize,
			    OPTIONAL u32 dwDspAddr, u32 dwArg)
{
	DSP_STATUS status = DSP_SOK;
	struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl;
	struct CHNL_IRP *pChirp = NULL;
	struct WMD_DEV_CONTEXT *dev_ctxt;
	struct DEV_OBJECT *dev_obj;
	u32 dwState;
	bool fIsEOS;
	struct CHNL_MGR *pChnlMgr = pChnl->pChnlMgr;
	u8 *pHostSysBuf = NULL;
	bool fSchedDPC = false;
	u16 wMbVal = 0;

	fIsEOS = (cBytes == 0);
	/* Validate args:  */
	if (pHostBuf == NULL) {
		status = DSP_EPOINTER;
	} else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) {
		status = DSP_EHANDLE;
	} else if (fIsEOS && CHNL_IsInput(pChnl->uMode)) {
		status = CHNL_E_NOEOS;
	} else {
		/* Check the channel state: only queue chirp if channel state
		 * allows */
		dwState = pChnl->dwState;
		if (dwState != CHNL_STATEREADY) {
			if (dwState & CHNL_STATECANCEL)
				status = CHNL_E_CANCELLED;
			else if ((dwState & CHNL_STATEEOS)
				   && CHNL_IsOutput(pChnl->uMode))
				status = CHNL_E_EOS;
			else
				/* No other possible states left: */
				DBC_Assert(0);
		}
	}

	dev_obj = DEV_GetFirst();
	DEV_GetWMDContext(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = DSP_EHANDLE;

	if (DSP_FAILED(status))
		goto func_end;

	if (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1 && pHostBuf) {
		if (!(pHostBuf < (void *)USERMODE_ADDR)) {
			pHostSysBuf = pHostBuf;
			goto func_cont;
		}
		/* if addr in user mode, then copy to kernel space */
		pHostSysBuf = MEM_Alloc(cBufSize, MEM_NONPAGED);
		if (pHostSysBuf == NULL) {
			status = DSP_EMEMORY;
			goto func_end;
		}
		if (CHNL_IsOutput(pChnl->uMode)) {
			status = copy_from_user(pHostSysBuf, pHostBuf,
						cBufSize);
			if (status) {
				kfree(pHostSysBuf);
				pHostSysBuf = NULL;
				status = DSP_EPOINTER;
				goto func_end;
			}
		}
	}
func_cont:
	/* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
	 * channels. DPCCS is held to avoid race conditions with PCPY channels.
	 * If DPC is scheduled in process context (IO_Schedule) and any
	 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
	 * we disable ALL DPCs. We will try to disable ONLY IO DPC later.  */
	SYNC_EnterCS(pChnlMgr->hCSObj);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (pChnl->uChnlType == CHNL_PCPY) {
		/* This is a processor-copy channel. */
		if (DSP_SUCCEEDED(status) && CHNL_IsOutput(pChnl->uMode)) {
			/* Check buffer size on output channels for fit. */
			if (cBytes > IO_BufSize(pChnl->pChnlMgr->hIOMgr))
				status = CHNL_E_BUFSIZE;

		}
	}
	if (DSP_SUCCEEDED(status)) {
		/* Get a free chirp: */
		pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pFreeList);
		if (pChirp == NULL)
			status = CHNL_E_NOIORPS;

	}
	if (DSP_SUCCEEDED(status)) {
		/* Enqueue the chirp on the chnl's IORequest queue: */
		pChirp->pHostUserBuf = pChirp->pHostSysBuf = pHostBuf;
		if (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1)
			pChirp->pHostSysBuf = pHostSysBuf;

		/*
		 * Note: for dma chans dwDspAddr contains dsp address
		 * of SM buffer.
		 */
		DBC_Assert(pChnlMgr->uWordSize != 0);
		/* DSP address */
		pChirp->uDspAddr = dwDspAddr / pChnlMgr->uWordSize;
		pChirp->cBytes = cBytes;
		pChirp->cBufSize = cBufSize;
		/* Only valid for output channel */
		pChirp->dwArg = dwArg;
		pChirp->status = (fIsEOS ? CHNL_IOCSTATEOS :
						CHNL_IOCSTATCOMPLETE);
		LST_PutTail(pChnl->pIORequests, (struct list_head *)pChirp);
		pChnl->cIOReqs++;
		DBC_Assert(pChnl->cIOReqs <= pChnl->cChirps);
		/* If end of stream, update the channel state to prevent
		 * more IOR's: */
		if (fIsEOS)
			pChnl->dwState |= CHNL_STATEEOS;

		/* Legacy DSM Processor-Copy */
		DBC_Assert(pChnl->uChnlType == CHNL_PCPY);
		/* Request IO from the DSP */
		IO_RequestChnl(pChnlMgr->hIOMgr, pChnl,
			(CHNL_IsInput(pChnl->uMode) ? IO_INPUT : IO_OUTPUT),
								&wMbVal);
		fSchedDPC = true;


	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	SYNC_LeaveCS(pChnlMgr->hCSObj);
	if (wMbVal != 0)
		IO_IntrDSP2(pChnlMgr->hIOMgr, wMbVal);

	/* Schedule a DPC, to do the actual data transfer: */
	if (fSchedDPC)
		IO_Schedule(pChnlMgr->hIOMgr);

func_end:
	return status;
}
/*
 *  ======== WMD_CHNL_GetIOC ========
 *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
 *      completion record, which contains information about the completed
 *      I/O request.
 *      Note: Ensures Channel Invariant (see notes above).
 */
DSP_STATUS WMD_CHNL_GetIOC(struct CHNL_OBJECT *hChnl, u32 dwTimeOut,
			  OUT struct CHNL_IOC *pIOC)
{
	DSP_STATUS status = DSP_SOK;
	struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl;
	struct CHNL_IRP *pChirp;
	DSP_STATUS statSync;
	bool fDequeueIOC = true;
	struct CHNL_IOC ioc = { NULL, 0, 0, 0, 0 };
	u8 *pHostSysBuf = NULL;
	struct WMD_DEV_CONTEXT *dev_ctxt;
	struct DEV_OBJECT *dev_obj;

	/* Check args: */
	if (pIOC == NULL) {
		status = DSP_EPOINTER;
	} else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) {
		status = DSP_EHANDLE;
	} else if (dwTimeOut == CHNL_IOCNOWAIT) {
		if (LST_IsEmpty(pChnl->pIOCompletions))
			status = CHNL_E_NOIOC;

	}

	dev_obj = DEV_GetFirst();
	DEV_GetWMDContext(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = DSP_EHANDLE;

	if (DSP_FAILED(status))
		goto func_end;

	ioc.status = CHNL_IOCSTATCOMPLETE;
	if (dwTimeOut != CHNL_IOCNOWAIT && LST_IsEmpty(pChnl->pIOCompletions)) {
		if (dwTimeOut == CHNL_IOCINFINITE)
			dwTimeOut = SYNC_INFINITE;

		statSync = SYNC_WaitOnEvent(pChnl->hSyncEvent, dwTimeOut);
		if (statSync == DSP_ETIMEOUT) {
			/* No response from DSP */
			ioc.status |= CHNL_IOCSTATTIMEOUT;
			fDequeueIOC = false;
		} else if (statSync == DSP_EFAIL) {
			/* This can occur when the user mode thread is
			 * aborted (^C), or when _VWIN32_WaitSingleObject()
			 * fails due to unkown causes.  */
			/* Even though Wait failed, there may be something in
			 * the Q: */
			if (LST_IsEmpty(pChnl->pIOCompletions)) {
				ioc.status |= CHNL_IOCSTATCANCEL;
				fDequeueIOC = false;
			}
		}
	}
	/* See comment in AddIOReq */
	SYNC_EnterCS(pChnl->pChnlMgr->hCSObj);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (fDequeueIOC) {
		/* Dequeue IOC and set pIOC; */
		DBC_Assert(!LST_IsEmpty(pChnl->pIOCompletions));
		pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIOCompletions);
		/* Update pIOC from channel state and chirp: */
		if (pChirp) {
			pChnl->cIOCs--;
			/*  If this is a zero-copy channel, then set IOC's pBuf
			 *  to the DSP's address. This DSP address will get
			 *  translated to user's virtual addr later.  */
			{
				pHostSysBuf = pChirp->pHostSysBuf;
				ioc.pBuf = pChirp->pHostUserBuf;
			}
			ioc.cBytes = pChirp->cBytes;
			ioc.cBufSize = pChirp->cBufSize;
			ioc.dwArg = pChirp->dwArg;
			ioc.status |= pChirp->status;
			/* Place the used chirp on the free list: */
			LST_PutTail(pChnl->pFreeList,
					(struct list_head *)pChirp);
		} else {
			ioc.pBuf = NULL;
			ioc.cBytes = 0;
		}
	} else {
		ioc.pBuf = NULL;
		ioc.cBytes = 0;
		ioc.dwArg = 0;
		ioc.cBufSize = 0;
	}
	/* Ensure invariant: If any IOC's are queued for this channel... */
	if (!LST_IsEmpty(pChnl->pIOCompletions)) {
		/*  Since DSPStream_Reclaim() does not take a timeout
		 *  parameter, we pass the stream's timeout value to
		 *  WMD_CHNL_GetIOC. We cannot determine whether or not
		 *  we have waited in User mode. Since the stream's timeout
		 *  value may be non-zero, we still have to set the event.
		 *  Therefore, this optimization is taken out.
		 *
		 *  if (dwTimeOut == CHNL_IOCNOWAIT) {
		 *    ... ensure event is set..
		 *      SYNC_SetEvent(pChnl->hSyncEvent);
		 *  } */
		SYNC_SetEvent(pChnl->hSyncEvent);
	} else {
		/* else, if list is empty, ensure event is reset. */
		SYNC_ResetEvent(pChnl->hSyncEvent);
	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	SYNC_LeaveCS(pChnl->pChnlMgr->hCSObj);
	if (fDequeueIOC && (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1)) {
		if (!(ioc.pBuf < (void *) USERMODE_ADDR))
			goto func_cont;

		/* If the addr is in user mode, then copy it */
		if (!pHostSysBuf || !ioc.pBuf) {
			status = DSP_EPOINTER;
			goto func_cont;
		}
		if (!CHNL_IsInput(pChnl->uMode))
			goto func_cont1;

		/*pHostUserBuf */
		status = copy_to_user(ioc.pBuf, pHostSysBuf, ioc.cBytes);
		if (status) {
			if (current->flags & PF_EXITING)
				status = 0;
		}
		if (status)
			status = DSP_EPOINTER;
func_cont1:
		kfree(pHostSysBuf);
	}
func_cont:
	/* Update User's IOC block: */
	*pIOC = ioc;
func_end:
	return status;
}
Exemple #11
0
/*
 *  ======== bridge_brd_stop ========
 *  purpose:
 *      Puts DSP in self loop.
 *
 *  Preconditions :
 *  a) None
 */
static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
{
	int status = 0;
	struct bridge_dev_context *dev_context = dev_ctxt;
	u32 dsp_pwr_state;
	int i;
	struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
	struct omap_dsp_platform_data *pdata =
		omap_dspbridge_dev->dev.platform_data;

	if (dev_context->dw_brd_state == BRD_STOPPED)
		return status;

	/* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
	 * before turning off the clocks.. This is to ensure that there are no
	 * pending L3 or other transactons from IVA2 */
	dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
					OMAP_POWERSTATEST_MASK;
	if (dsp_pwr_state != PWRDM_POWER_OFF) {
		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
		sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
		mdelay(10);

		/* IVA2 is not in OFF state */
		/* Set PM_PWSTCTRL_IVA2  to OFF */
		(*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
			PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
		/* Set the SW supervised state transition for Sleep */
		(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
					OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
	}
	udelay(10);
	/* Release the Ext Base virtual Address as the next DSP Program
	 * may have a different load address */
	if (dev_context->dw_dsp_ext_base_addr)
		dev_context->dw_dsp_ext_base_addr = 0;

	dev_context->dw_brd_state = BRD_STOPPED;	/* update board state */

	dsp_wdt_enable(false);

	/* Reset DSP */
	(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
		OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);

	/* Disable the mailbox interrupts */
	if (dev_context->mbox) {
		omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
		omap_mbox_put(dev_context->mbox);
		dev_context->mbox = NULL;
	}
	if (dev_context->dsp_mmu) {
		pr_err("Proc stop mmu if statement\n");
		for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) {
			if (!tlb[i].ul_gpp_pa)
				continue;
			iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
		}
		i = 0;
		while (l4_peripheral_table[i].phys_addr) {
			iommu_kunmap(dev_context->dsp_mmu,
				l4_peripheral_table[i].dsp_virt_addr);
			i++;
		}
		iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
		iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
		dsp_mmu_exit(dev_context->dsp_mmu);
		dev_context->dsp_mmu = NULL;
	}
	/* Reset IVA IOMMU*/
	(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
		OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);

	dsp_clock_disable_all(dev_context->dsp_per_clks);
	dsp_clk_disable(DSP_CLK_IVA2);

	return status;
}