Beispiel #1
0
void dsp_wdt_dpc(unsigned long data)
{
	struct deh_mgr *deh_mgr;
	dev_get_deh_mgr(dev_get_first(), &deh_mgr);
	if (deh_mgr)
		bridge_deh_notify(deh_mgr, DSP_WDTOVERFLOW, 0);
}
int api_init_complete2(void)
{
    int status = 0;
    struct cfg_devnode *dev_node;
    struct dev_object *hdev_obj;
    struct drv_data *drv_datap;
    u8 dev_type;

    for (hdev_obj = dev_get_first(); hdev_obj != NULL;
            hdev_obj = dev_get_next(hdev_obj)) {
        if (dev_get_dev_node(hdev_obj, &dev_node))
            continue;

        if (dev_get_dev_type(hdev_obj, &dev_type))
            continue;

        if ((dev_type == DSP_UNIT) || (dev_type == IVA_UNIT)) {
            drv_datap = dev_get_drvdata(bridge);

            if (drv_datap && drv_datap->base_img)
                proc_auto_start(dev_node, hdev_obj);
        }
    }

    return status;
}
static ssize_t wdt3_store(struct device *dev, struct device_attribute *attr,
			  const char *buf, size_t n)
{
	u32 wdt3;
	struct dev_object *dev_object;
	struct wmd_dev_context *dev_ctxt;

	if (sscanf(buf, "%d", &wdt3) != 1)
		return -EINVAL;

	dev_object = dev_get_first();
	if (dev_object == NULL)
		goto func_end;
	dev_get_wmd_context(dev_object, &dev_ctxt);
	if (dev_ctxt == NULL)
		goto func_end;

	/* enable WDT */
	if (wdt3 == 1) {
		if (dsp_wdt_get_enable())
			goto func_end;
		dsp_wdt_set_enable(true);
		if (!clk_get_use_cnt(SERVICESCLK_WDT3_FCK) &&
		    dev_ctxt->dw_brd_state != BRD_DSP_HIBERNATION)
			dsp_wdt_enable(true);
	} else if (wdt3 == 0) {
		if (!dsp_wdt_get_enable())
			goto func_end;
		if (clk_get_use_cnt(SERVICESCLK_WDT3_FCK))
			dsp_wdt_enable(false);
		dsp_wdt_set_enable(false);
	}
func_end:
	return n;
}
int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
{
	struct io_mgr *pio_mgr;
	struct dev_object *dev_obj;
	unsigned long flags;

	dev_obj = dev_get_first();
	dev_get_io_mgr(dev_obj, &pio_mgr);

	if (!pio_mgr)
		return NOTIFY_BAD;

	pio_mgr->intr_val = (u16)((u32)msg);
	if (pio_mgr->intr_val & MBX_PM_CLASS)
		io_dispatch_pm(pio_mgr);

	if (pio_mgr->intr_val == MBX_DEH_RESET) {
		pio_mgr->intr_val = 0;
	} else {
		spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
		pio_mgr->dpc_req++;
		spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
		tasklet_schedule(&pio_mgr->dpc_tasklet);
	}
	return NOTIFY_OK;
}
/*
 *  ======== api_init_complete2 ========
 *  Purpose:
 *      Perform any required bridge initialization which cannot
 *      be performed in api_init() or dev_start_device() due
 *      to the fact that some services are not yet
 *      completely initialized.
 *  Parameters:
 *  Returns:
 *      0:	Allow this device to load
 *      -EPERM:      Failure.
 *  Requires:
 *      Bridge API initialized.
 *  Ensures:
 */
int api_init_complete2(void)
{
	int status = 0;
	struct cfg_devnode *dev_node;
	struct dev_object *hdev_obj;
	struct drv_data *drv_datap;
	u8 dev_type;

	/*  Walk the list of DevObjects, get each devnode, and attempting to
	 *  autostart the board. Note that this requires COF loading, which
	 *  requires KFILE. */
	for (hdev_obj = dev_get_first(); hdev_obj != NULL;
	     hdev_obj = dev_get_next(hdev_obj)) {
		if (dev_get_dev_node(hdev_obj, &dev_node))
			continue;

		if (dev_get_dev_type(hdev_obj, &dev_type))
			continue;

		if ((dev_type == DSP_UNIT) || (dev_type == IVA_UNIT)) {
			drv_datap = dev_get_drvdata(bridge);

			if (drv_datap && drv_datap->base_img)
				proc_auto_start(dev_node, hdev_obj);
		}
	}

	return status;
}
Beispiel #6
0
int send_mbox_callback(void *arg)
{
	struct wmd_dev_context *dev_context;
	struct cfg_hostres *resources;
	u32 temp;
	struct dspbridge_platform_data *pdata =
		omap_dspbridge_dev->dev.platform_data;

	dev_get_wmd_context(dev_get_first(), &dev_context);

	if (!dev_context || !dev_context->resources)
		return -EFAULT;

	resources = dev_context->resources;
	if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
	    dev_context->dw_brd_state == BRD_HIBERNATION) {
		/* Restart the peripheral clocks */
		dsp_peripheral_clocks_enable(dev_context, NULL);

#ifdef CONFIG_BRIDGE_WDT3
		dsp_wdt_enable(true);
#endif

		/*
		 * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control
		 *     in CM_AUTOIDLE_PLL_IVA2 register
		 */
		(*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
				OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);

		/*
		 * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to
		 *     0.75 MHz - 1.0 MHz
		 * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode
		 */
		(*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK |
				OMAP3430_EN_IVA2_DPLL_MASK,
				0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT |
				0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT,
				OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL);

		/* Restore mailbox settings */
		omap_mbox_restore_ctx(dev_context->mbox);

		/* Access MMU SYS CONFIG register to generate a short wakeup */
		temp = *(reg_uword32 *) (resources->dw_dmmu_base + 0x10);

		dev_context->dw_brd_state = BRD_RUNNING;
	} else if (dev_context->dw_brd_state == BRD_RETENTION) {
		/* Restart the peripheral clocks */
		dsp_peripheral_clocks_enable(dev_context, NULL);
		dev_context->dw_brd_state = BRD_RUNNING;
	}

	return 0;
}
Beispiel #7
0
/*
 *  ======== dmm_get_handle ========
 *  Purpose:
 *      Return the dynamic memory manager object for this device.
 *      This is typically called from the client process.
 */
int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
{
	int status = 0;
	struct dev_object *hdev_obj;

	if (hprocessor != NULL)
		status = proc_get_dev_object(hprocessor, &hdev_obj);
	else
		hdev_obj = dev_get_first();	/* default */

	if (!status)
		status = dev_get_dmm_mgr(hdev_obj, dmm_manager);

	return status;
}
Beispiel #8
0
/*
 *  ======== cmm_get_handle ========
 *  Purpose:
 *      Return the communication memory manager object for this device.
 *      This is typically called from the client process.
 */
int cmm_get_handle(void *hprocessor, OUT struct cmm_object ** ph_cmm_mgr)
{
	int status = 0;
	struct dev_object *hdev_obj;

	DBC_REQUIRE(refs > 0);
	DBC_REQUIRE(ph_cmm_mgr != NULL);
	if (hprocessor != NULL)
		status = proc_get_dev_object(hprocessor, &hdev_obj);
	else
		hdev_obj = dev_get_first();	/* default */

	if (DSP_SUCCEEDED(status))
		status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);

	return status;
}
static void bridge_recover(struct work_struct *work)
{
	struct dev_object *dev;
	struct cfg_devnode *dev_node;
	if (atomic_read(&bridge_cref)) {
		INIT_COMPLETION(bridge_comp);
		while (!wait_for_completion_timeout(&bridge_comp,
						msecs_to_jiffies(REC_TIMEOUT)))
			pr_info("%s:%d handle(s) still opened\n",
					__func__, atomic_read(&bridge_cref));
	}
	dev = dev_get_first();
	dev_get_dev_node(dev, &dev_node);
	if (!dev_node || proc_auto_start(dev_node, dev))
		pr_err("DSP could not be restarted\n");
	recover = false;
	complete_all(&bridge_open_comp);
}
static int BRIDGE_RESUME(struct platform_device *pdev)
{
	u32 status = 0;
	struct wmd_dev_context *dev_ctxt;

	dev_get_wmd_context(dev_get_first(), &dev_ctxt);
	if (!dev_ctxt)
		return -EFAULT;

	/*
	 * only wake up the DSP if it was not in Hibernation before the
	 * suspend transition
	 */
	if (dev_ctxt->dw_brd_state != BRD_DSP_HIBERNATION)
		status = pwr_wake_dsp(time_out);

	if (DSP_FAILED(status))
		return status;

	bridge_suspend_data.suspended = 0;
	wake_up(&bridge_suspend_data.suspend_wq);
	return 0;
}
Beispiel #11
0
/*
 *  ======== bridge_chnl_add_io_req ========
 *      Enqueue an I/O request for data transfer on a channel to the DSP.
 *      The direction (mode) is specified in the channel object. Note the DSP
 *      address is specified for channels opened in direct I/O mode.
 */
int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *pHostBuf,
			       u32 byte_size, u32 buf_size,
			       OPTIONAL u32 dw_dsp_addr, u32 dw_arg)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj = NULL;
	struct wmd_dev_context *dev_ctxt;
	struct dev_object *dev_obj;
	u8 dw_state;
	bool is_eos;
	struct chnl_mgr *chnl_mgr_obj;
	u8 *host_sys_buf = NULL;
	bool sched_dpc = false;
	u16 mb_val = 0;

	is_eos = (byte_size == 0);

	/* Validate args:  */
	if (pHostBuf == NULL) {
		status = -EFAULT;
	} else if (!pchnl) {
		status = -EFAULT;
	} else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
		status = -EPERM;
	} else {
		/* Check the channel state: only queue chirp if channel state
		 * allows */
		dw_state = pchnl->dw_state;
		if (dw_state != CHNL_STATEREADY) {
			if (dw_state & CHNL_STATECANCEL)
				status = -ECANCELED;
			else if ((dw_state & CHNL_STATEEOS)
				 && CHNL_IS_OUTPUT(pchnl->chnl_mode))
				status = -EPIPE;
			else
				/* No other possible states left: */
				DBC_ASSERT(0);
		}
	}

	if (DSP_FAILED(status))
		goto func_end;

	chnl_mgr_obj = pchnl->chnl_mgr_obj;

	dev_obj = dev_get_first();
	dev_get_wmd_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt || !chnl_mgr_obj)
		status = -EFAULT;

	if (DSP_FAILED(status))
		goto func_end;

	if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && pHostBuf) {
		if (!(pHostBuf < (void *)USERMODE_ADDR)) {
			host_sys_buf = pHostBuf;
			goto func_cont;
		}
		/* if addr in user mode, then copy to kernel space */
		host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
		if (host_sys_buf == NULL) {
			status = -ENOMEM;
			goto func_end;
		}
		if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
			status = copy_from_user(host_sys_buf, pHostBuf,
						buf_size);
			if (status) {
				kfree(host_sys_buf);
				host_sys_buf = NULL;
				status = -EFAULT;
				goto func_end;
			}
		}
	}
func_cont:
	/* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
	 * channels. DPCCS is held to avoid race conditions with PCPY channels.
	 * If DPC is scheduled in process context (iosm_schedule) and any
	 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
	 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (pchnl->chnl_type == CHNL_PCPY) {
		/* This is a processor-copy channel. */
		if (DSP_SUCCEEDED(status) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
			/* Check buffer size on output channels for fit. */
			if (byte_size >
			    io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
				status = -EINVAL;

		}
	}
	if (DSP_SUCCEEDED(status)) {
		/* Get a free chirp: */
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
		if (chnl_packet_obj == NULL)
			status = -EIO;

	}
	if (DSP_SUCCEEDED(status)) {
		/* Enqueue the chirp on the chnl's IORequest queue: */
		chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
		    pHostBuf;
		if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
			chnl_packet_obj->host_sys_buf = host_sys_buf;

		/*
		 * Note: for dma chans dw_dsp_addr contains dsp address
		 * of SM buffer.
		 */
		DBC_ASSERT(chnl_mgr_obj->word_size != 0);
		/* DSP address */
		chnl_packet_obj->dsp_tx_addr =
		    dw_dsp_addr / chnl_mgr_obj->word_size;
		chnl_packet_obj->byte_size = byte_size;
		chnl_packet_obj->buf_size = buf_size;
		/* Only valid for output channel */
		chnl_packet_obj->dw_arg = dw_arg;
		chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
					   CHNL_IOCSTATCOMPLETE);
		lst_put_tail(pchnl->pio_requests,
			     (struct list_head *)chnl_packet_obj);
		pchnl->cio_reqs++;
		DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
		/* If end of stream, update the channel state to prevent
		 * more IOR's: */
		if (is_eos)
			pchnl->dw_state |= CHNL_STATEEOS;

		/* Legacy DSM Processor-Copy */
		DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
		/* Request IO from the DSP */
		io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
				(CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
				 IO_OUTPUT), &mb_val);
		sched_dpc = true;

	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	if (mb_val != 0)
		io_intr_dsp2(chnl_mgr_obj->hio_mgr, mb_val);

	/* Schedule a DPC, to do the actual data transfer: */
	if (sched_dpc)
		iosm_schedule(chnl_mgr_obj->hio_mgr);

func_end:
	return status;
}
Beispiel #12
0
/*
 *  ======== bridge_chnl_get_ioc ========
 *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
 *      completion record, which contains information about the completed
 *      I/O request.
 *      Note: Ensures Channel Invariant (see notes above).
 */
int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 dwTimeOut,
			    OUT struct chnl_ioc *pIOC)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj;
	int stat_sync;
	bool dequeue_ioc = true;
	struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
	u8 *host_sys_buf = NULL;
	struct wmd_dev_context *dev_ctxt;
	struct dev_object *dev_obj;

	/* Check args: */
	if (pIOC == NULL) {
		status = -EFAULT;
	} else if (!pchnl) {
		status = -EFAULT;
	} else if (dwTimeOut == CHNL_IOCNOWAIT) {
		if (LST_IS_EMPTY(pchnl->pio_completions))
			status = -EREMOTEIO;

	}

	dev_obj = dev_get_first();
	dev_get_wmd_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = -EFAULT;

	if (DSP_FAILED(status))
		goto func_end;

	ioc.status = CHNL_IOCSTATCOMPLETE;
	if (dwTimeOut !=
	    CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
		if (dwTimeOut == CHNL_IOCINFINITE)
			dwTimeOut = SYNC_INFINITE;

		stat_sync = sync_wait_on_event(pchnl->sync_event, dwTimeOut);
		if (stat_sync == -ETIME) {
			/* No response from DSP */
			ioc.status |= CHNL_IOCSTATTIMEOUT;
			dequeue_ioc = false;
		} else if (stat_sync == -EPERM) {
			/* This can occur when the user mode thread is
			 * aborted (^C), or when _VWIN32_WaitSingleObject()
			 * fails due to unkown causes. */
			/* Even though Wait failed, there may be something in
			 * the Q: */
			if (LST_IS_EMPTY(pchnl->pio_completions)) {
				ioc.status |= CHNL_IOCSTATCANCEL;
				dequeue_ioc = false;
			}
		}
	}
	/* See comment in AddIOReq */
	spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (dequeue_ioc) {
		/* Dequeue IOC and set pIOC; */
		DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
		/* Update pIOC from channel state and chirp: */
		if (chnl_packet_obj) {
			pchnl->cio_cs--;
			/*  If this is a zero-copy channel, then set IOC's pbuf
			 *  to the DSP's address. This DSP address will get
			 *  translated to user's virtual addr later. */
			{
				host_sys_buf = chnl_packet_obj->host_sys_buf;
				ioc.pbuf = chnl_packet_obj->host_user_buf;
			}
			ioc.byte_size = chnl_packet_obj->byte_size;
			ioc.buf_size = chnl_packet_obj->buf_size;
			ioc.dw_arg = chnl_packet_obj->dw_arg;
			ioc.status |= chnl_packet_obj->status;
			/* Place the used chirp on the free list: */
			lst_put_tail(pchnl->free_packets_list,
				     (struct list_head *)chnl_packet_obj);
		} else {
			ioc.pbuf = NULL;
			ioc.byte_size = 0;
		}
	} else {
		ioc.pbuf = NULL;
		ioc.byte_size = 0;
		ioc.dw_arg = 0;
		ioc.buf_size = 0;
	}
	/* Ensure invariant: If any IOC's are queued for this channel... */
	if (!LST_IS_EMPTY(pchnl->pio_completions)) {
		/*  Since DSPStream_Reclaim() does not take a timeout
		 *  parameter, we pass the stream's timeout value to
		 *  bridge_chnl_get_ioc. We cannot determine whether or not
		 *  we have waited in User mode. Since the stream's timeout
		 *  value may be non-zero, we still have to set the event.
		 *  Therefore, this optimization is taken out.
		 *
		 *  if (dwTimeOut == CHNL_IOCNOWAIT) {
		 *    ... ensure event is set..
		 *      sync_set_event(pchnl->sync_event);
		 *  } */
		sync_set_event(pchnl->sync_event);
	} else {
		/* else, if list is empty, ensure event is reset. */
		sync_reset_event(pchnl->sync_event);
	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	if (dequeue_ioc
	    && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
		if (!(ioc.pbuf < (void *)USERMODE_ADDR))
			goto func_cont;

		/* If the addr is in user mode, then copy it */
		if (!host_sys_buf || !ioc.pbuf) {
			status = -EFAULT;
			goto func_cont;
		}
		if (!CHNL_IS_INPUT(pchnl->chnl_mode))
			goto func_cont1;

		/*host_user_buf */
		status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
		if (status) {
			if (current->flags & PF_EXITING)
				status = 0;
		}
		if (status)
			status = -EFAULT;
func_cont1:
		kfree(host_sys_buf);
	}
func_cont:
	/* Update User's IOC block: */
	*pIOC = ioc;
func_end:
	return status;
}