Example #1
0
/*
 *  ======== pwr_wake_dsp ========
 *    Send command to DSP to wake it from sleep.
 */
int pwr_wake_dsp(const u32 timeout)
{
	struct bridge_drv_interface *intf_fxns;
	struct bridge_dev_context *dw_context;
	int status = -EPERM;
	struct dev_object *hdev_obj = NULL;
	u32 arg = timeout;

	for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
	     hdev_obj != NULL;
	     hdev_obj = (struct dev_object *)drv_get_next_dev_object
	     ((u32) hdev_obj)) {
		if (!(dev_get_bridge_context(hdev_obj,
						      (struct bridge_dev_context
						       **)&dw_context))) {
			if (!(dev_get_intf_fxns(hdev_obj,
			      (struct bridge_drv_interface **)&intf_fxns))) {
				status =
				    (*intf_fxns->pfn_dev_cntrl) (dw_context,
							BRDIOCTL_WAKEUP,
							(void *)&arg);
			}
		}
	}
	return status;
}
Example #2
0
/*
 *  ======== pwr_pm_post_scale========
 *    Sends post-notification message to DSP.
 */
int pwr_pm_post_scale(u16 voltage_domain, u32 level)
{
	struct bridge_drv_interface *intf_fxns;
	struct bridge_dev_context *dw_context;
	int status = -EPERM;
	struct dev_object *hdev_obj = NULL;
	u32 arg[2];

	arg[0] = voltage_domain;
	arg[1] = level;

	for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
	     hdev_obj != NULL;
	     hdev_obj = (struct dev_object *)drv_get_next_dev_object
	     ((u32) hdev_obj)) {
		if (!(dev_get_bridge_context(hdev_obj,
						      (struct bridge_dev_context
						       **)&dw_context))) {
			if (!(dev_get_intf_fxns(hdev_obj,
			      (struct bridge_drv_interface **)&intf_fxns))) {
				status =
				    (*intf_fxns->pfn_dev_cntrl) (dw_context,
						BRDIOCTL_POSTSCALE_NOTIFY,
						(void *)&arg);
			}
		}
	}
	return status;

}
Example #3
0
int bridge_deh_create(struct deh_mgr **ret_deh,
		struct dev_object *hdev_obj)
{
	int status;
	struct deh_mgr *deh;
	struct bridge_dev_context *hbridge_context = NULL;

	/*  Message manager will be created when a file is loaded, since
	 *  size of message buffer in shared memory is configurable in
	 *  the base image. */
	/* Get Bridge context info. */
	dev_get_bridge_context(hdev_obj, &hbridge_context);
	/* Allocate IO manager object: */
	deh = kzalloc(sizeof(*deh), GFP_KERNEL);
	if (!deh) {
		status = -ENOMEM;
		goto err;
	}

	/* Create an NTFY object to manage notifications */
	deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
	if (!deh->ntfy_obj) {
		status = -ENOMEM;
		goto err;
	}
	ntfy_init(deh->ntfy_obj);

	/* Create a MMUfault DPC */
	tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);

	/* Fill in context structure */
	deh->bridge_context = hbridge_context;

	/* Install ISR function for DSP MMU fault */
	status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
			"DspBridge\tiommu fault", deh);
	if (status < 0)
		goto err;

	*ret_deh = deh;
	return 0;

err:
	bridge_deh_destroy(deh);
	*ret_deh = NULL;
	return status;
}
static void mcbsp_clk_prepare(bool flag, u8 id)
{
	struct cfg_hostres *resources;
	struct dev_object *hdev_object = NULL;
	struct bridge_dev_context *bridge_context = NULL;
	u32 val;

	hdev_object = (struct dev_object *)drv_get_first_dev_object();
	if (!hdev_object)
		return;

	dev_get_bridge_context(hdev_object, &bridge_context);
	if (!bridge_context)
		return;

	resources = bridge_context->resources;
	if (!resources)
		return;

	if (flag) {
		if (id == DSP_CLK_MCBSP1) {
			/* set MCBSP1_CLKS, on McBSP1 ON */
			val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
			val |= 1 << 2;
			__raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
		} else if (id == DSP_CLK_MCBSP2) {
			/* set MCBSP2_CLKS, on McBSP2 ON */
			val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
			val |= 1 << 6;
			__raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
		}
	} else {
		if (id == DSP_CLK_MCBSP1) {
			/* clear MCBSP1_CLKS, on McBSP1 OFF */
			val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
			val &= ~(1 << 2);
			__raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
		} else if (id == DSP_CLK_MCBSP2) {
			/* clear MCBSP2_CLKS, on McBSP2 OFF */
			val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
			val &= ~(1 << 6);
			__raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
		}
	}
}
Example #5
0
/*
 *  ======== pwr_sleep_dsp ========
 *    Send command to DSP to enter sleep state.
 */
int pwr_sleep_dsp(const u32 sleep_code, const u32 timeout)
{
	struct bridge_drv_interface *intf_fxns;
	struct bridge_dev_context *dw_context;
	int status = -EPERM;
	struct dev_object *hdev_obj = NULL;
	u32 ioctlcode = 0;
	u32 arg = timeout;

	for (hdev_obj = (struct dev_object *)drv_get_first_dev_object();
	     hdev_obj != NULL;
	     hdev_obj =
	     (struct dev_object *)drv_get_next_dev_object((u32) hdev_obj)) {
		if (dev_get_bridge_context(hdev_obj,
						(struct bridge_dev_context **)
						   &dw_context)) {
			continue;
		}
		if (dev_get_intf_fxns(hdev_obj,
						(struct bridge_drv_interface **)
						&intf_fxns)) {
			continue;
		}
		if (sleep_code == PWR_DEEPSLEEP)
			ioctlcode = BRDIOCTL_DEEPSLEEP;
		else if (sleep_code == PWR_EMERGENCYDEEPSLEEP)
			ioctlcode = BRDIOCTL_EMERGENCYSLEEP;
		else
			status = -EINVAL;

		if (status != -EINVAL) {
			status = (*intf_fxns->pfn_dev_cntrl) (dw_context,
							      ioctlcode,
							      (void *)&arg);
		}
	}
	return status;
}
Example #6
0
/*
 *  ======== bridge_chnl_add_io_req ========
 *      Enqueue an I/O request for data transfer on a channel to the DSP.
 *      The direction (mode) is specified in the channel object. Note the DSP
 *      address is specified for channels opened in direct I/O mode.
 */
int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
			       u32 byte_size, u32 buf_size,
			       u32 dw_dsp_addr, u32 dw_arg)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj = NULL;
	struct bridge_dev_context *dev_ctxt;
	struct dev_object *dev_obj;
	u8 dw_state;
	bool is_eos;
	struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
	u8 *host_sys_buf = NULL;
	bool sched_dpc = false;
	u16 mb_val = 0;

	is_eos = (byte_size == 0);

	/* Validate args */
	if (!host_buf || !pchnl) {
		status = -EFAULT;
	} else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
		status = -EPERM;
	} else {
		/*
		 * Check the channel state: only queue chirp if channel state
		 * allows it.
		 */
		dw_state = pchnl->dw_state;
		if (dw_state != CHNL_STATEREADY) {
			if (dw_state & CHNL_STATECANCEL)
				status = -ECANCELED;
			else if ((dw_state & CHNL_STATEEOS) &&
				 CHNL_IS_OUTPUT(pchnl->chnl_mode))
				status = -EPIPE;
			else
				/* No other possible states left */
				DBC_ASSERT(0);
		}
	}

	dev_obj = dev_get_first();
	dev_get_bridge_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = -EFAULT;

	if (status)
		goto func_end;

	if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
		if (!(host_buf < (void *)USERMODE_ADDR)) {
			host_sys_buf = host_buf;
			goto func_cont;
		}
		/* if addr in user mode, then copy to kernel space */
		host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
		if (host_sys_buf == NULL) {
			status = -ENOMEM;
			goto func_end;
		}
		if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
			status = copy_from_user(host_sys_buf, host_buf,
						buf_size);
			if (status) {
				kfree(host_sys_buf);
				host_sys_buf = NULL;
				status = -EFAULT;
				goto func_end;
			}
		}
	}
func_cont:
	/* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
	 * channels. DPCCS is held to avoid race conditions with PCPY channels.
	 * If DPC is scheduled in process context (iosm_schedule) and any
	 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
	 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (pchnl->chnl_type == CHNL_PCPY) {
		/* This is a processor-copy channel. */
		if (!status && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
			/* Check buffer size on output channels for fit. */
			if (byte_size >
			    io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
				status = -EINVAL;

		}
	}
	if (!status) {
		/* Get a free chirp: */
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
		if (chnl_packet_obj == NULL)
			status = -EIO;

	}
	if (!status) {
		/* Enqueue the chirp on the chnl's IORequest queue: */
		chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
		    host_buf;
		if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
			chnl_packet_obj->host_sys_buf = host_sys_buf;

		/*
		 * Note: for dma chans dw_dsp_addr contains dsp address
		 * of SM buffer.
		 */
		DBC_ASSERT(chnl_mgr_obj->word_size != 0);
		/* DSP address */
		chnl_packet_obj->dsp_tx_addr =
		    dw_dsp_addr / chnl_mgr_obj->word_size;
		chnl_packet_obj->byte_size = byte_size;
		chnl_packet_obj->buf_size = buf_size;
		/* Only valid for output channel */
		chnl_packet_obj->dw_arg = dw_arg;
		chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
					   CHNL_IOCSTATCOMPLETE);
		lst_put_tail(pchnl->pio_requests,
			     (struct list_head *)chnl_packet_obj);
		pchnl->cio_reqs++;
		DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
		/*
		 * If end of stream, update the channel state to prevent
		 * more IOR's.
		 */
		if (is_eos)
			pchnl->dw_state |= CHNL_STATEEOS;

		/* Legacy DSM Processor-Copy */
		DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
		/* Request IO from the DSP */
		io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
				(CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
				 IO_OUTPUT), &mb_val);
		sched_dpc = true;

	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	if (mb_val != 0)
		sm_interrupt_dsp(dev_ctxt, mb_val);

	/* Schedule a DPC, to do the actual data transfer */
	if (sched_dpc)
		iosm_schedule(chnl_mgr_obj->hio_mgr);

func_end:
	return status;
}
Example #7
0
/*
 *  ======== bridge_chnl_get_ioc ========
 *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
 *      completion record, which contains information about the completed
 *      I/O request.
 *      Note: Ensures Channel Invariant (see notes above).
 */
int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
			    struct chnl_ioc *chan_ioc)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj;
	int stat_sync;
	bool dequeue_ioc = true;
	struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
	u8 *host_sys_buf = NULL;
	struct bridge_dev_context *dev_ctxt;
	struct dev_object *dev_obj;

	/* Check args: */
	if (!chan_ioc || !pchnl) {
		status = -EFAULT;
	} else if (timeout == CHNL_IOCNOWAIT) {
		if (LST_IS_EMPTY(pchnl->pio_completions))
			status = -EREMOTEIO;

	}

	dev_obj = dev_get_first();
	dev_get_bridge_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = -EFAULT;

	if (status)
		goto func_end;

	ioc.status = CHNL_IOCSTATCOMPLETE;
	if (timeout !=
	    CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
		if (timeout == CHNL_IOCINFINITE)
			timeout = SYNC_INFINITE;

		stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
		if (stat_sync == -ETIME) {
			/* No response from DSP */
			ioc.status |= CHNL_IOCSTATTIMEOUT;
			dequeue_ioc = false;
		} else if (stat_sync == -EPERM) {
			/* This can occur when the user mode thread is
			 * aborted (^C), or when _VWIN32_WaitSingleObject()
			 * fails due to unkown causes. */
			/* Even though Wait failed, there may be something in
			 * the Q: */
			if (LST_IS_EMPTY(pchnl->pio_completions)) {
				ioc.status |= CHNL_IOCSTATCANCEL;
				dequeue_ioc = false;
			}
		}
	}
	/* See comment in AddIOReq */
	spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (dequeue_ioc) {
		/* Dequeue IOC and set chan_ioc; */
		DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
		/* Update chan_ioc from channel state and chirp: */
		if (chnl_packet_obj) {
			pchnl->cio_cs--;
			/*  If this is a zero-copy channel, then set IOC's pbuf
			 *  to the DSP's address. This DSP address will get
			 *  translated to user's virtual addr later. */
			{
				host_sys_buf = chnl_packet_obj->host_sys_buf;
				ioc.pbuf = chnl_packet_obj->host_user_buf;
			}
			ioc.byte_size = chnl_packet_obj->byte_size;
			ioc.buf_size = chnl_packet_obj->buf_size;
			ioc.dw_arg = chnl_packet_obj->dw_arg;
			ioc.status |= chnl_packet_obj->status;
			/* Place the used chirp on the free list: */
			lst_put_tail(pchnl->free_packets_list,
				     (struct list_head *)chnl_packet_obj);
		} else {
			ioc.pbuf = NULL;
			ioc.byte_size = 0;
		}
	} else {
		ioc.pbuf = NULL;
		ioc.byte_size = 0;
		ioc.dw_arg = 0;
		ioc.buf_size = 0;
	}
	/* Ensure invariant: If any IOC's are queued for this channel... */
	if (!LST_IS_EMPTY(pchnl->pio_completions)) {
		/*  Since DSPStream_Reclaim() does not take a timeout
		 *  parameter, we pass the stream's timeout value to
		 *  bridge_chnl_get_ioc. We cannot determine whether or not
		 *  we have waited in User mode. Since the stream's timeout
		 *  value may be non-zero, we still have to set the event.
		 *  Therefore, this optimization is taken out.
		 *
		 *  if (timeout == CHNL_IOCNOWAIT) {
		 *    ... ensure event is set..
		 *      sync_set_event(pchnl->sync_event);
		 *  } */
		sync_set_event(pchnl->sync_event);
	} else {
		/* else, if list is empty, ensure event is reset. */
		sync_reset_event(pchnl->sync_event);
	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	if (dequeue_ioc
	    && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
		if (!(ioc.pbuf < (void *)USERMODE_ADDR))
			goto func_cont;

		/* If the addr is in user mode, then copy it */
		if (!host_sys_buf || !ioc.pbuf) {
			status = -EFAULT;
			goto func_cont;
		}
		if (!CHNL_IS_INPUT(pchnl->chnl_mode))
			goto func_cont1;

		/*host_user_buf */
		status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
		if (status) {
			if (current->flags & PF_EXITING)
				status = 0;
		}
		if (status)
			status = -EFAULT;
func_cont1:
		kfree(host_sys_buf);
	}
func_cont:
	/* Update User's IOC block: */
	*chan_ioc = ioc;
func_end:
	return status;
}
int bridge_io_on_loaded(struct io_mgr *hio_mgr)
{
	struct cod_manager *cod_man;
	struct chnl_mgr *hchnl_mgr;
	struct msg_mgr *hmsg_mgr;
	u32 ul_shm_base;
	u32 ul_shm_base_offset;
	u32 ul_shm_limit;
	u32 ul_shm_length = -1;
	u32 ul_mem_length = -1;
	u32 ul_msg_base;
	u32 ul_msg_limit;
	u32 ul_msg_length = -1;
	u32 ul_ext_end;
	u32 ul_gpp_pa = 0;
	u32 ul_gpp_va = 0;
	u32 ul_dsp_va = 0;
	u32 ul_seg_size = 0;
	u32 ul_pad_size = 0;
	u32 i;
	int status = 0;
	u8 num_procs = 0;
	s32 ndx = 0;
	
	struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
	struct cfg_hostres *host_res;
	struct bridge_dev_context *pbridge_context;
	u32 map_attrs;
	u32 shm0_end;
	u32 ul_dyn_ext_base;
	u32 ul_seg1_size = 0;
	u32 pa_curr = 0;
	u32 va_curr = 0;
	u32 gpp_va_curr = 0;
	u32 num_bytes = 0;
	u32 all_bits = 0;
	u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
		HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
	};

	status = dev_get_bridge_context(hio_mgr->dev_obj, &pbridge_context);
	if (!pbridge_context) {
		status = -EFAULT;
		goto func_end;
	}

	host_res = pbridge_context->resources;
	if (!host_res) {
		status = -EFAULT;
		goto func_end;
	}
	status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
	if (!cod_man) {
		status = -EFAULT;
		goto func_end;
	}
	hchnl_mgr = hio_mgr->chnl_mgr;
	
	dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr);
	hmsg_mgr = hio_mgr->msg_mgr;
	if (!hchnl_mgr || !hmsg_mgr) {
		status = -EFAULT;
		goto func_end;
	}
	if (hio_mgr->shared_mem)
		hio_mgr->shared_mem = NULL;

	
	status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
				   &ul_shm_base);
	if (status) {
		status = -EFAULT;
		goto func_end;
	}
	status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
				   &ul_shm_limit);
	if (status) {
		status = -EFAULT;
		goto func_end;
	}
	if (ul_shm_limit <= ul_shm_base) {
		status = -EINVAL;
		goto func_end;
	}
	
	ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
	
	dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
		__func__, (ul_shm_length - sizeof(struct shm)));

	
	status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
					   &ul_msg_base);
	if (!status) {
		status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
					   &ul_msg_limit);
		if (!status) {
			if (ul_msg_limit <= ul_msg_base) {
				status = -EINVAL;
			} else {
				ul_msg_length =
				    (ul_msg_limit - ul_msg_base +
				     1) * hio_mgr->word_size;
				ul_mem_length = ul_shm_length + ul_msg_length;
			}
		} else {
			status = -EFAULT;
		}
	} else {
		status = -EFAULT;
	}
	if (!status) {
#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
		status =
		    cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
#else
		status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
					   &shm0_end);
#endif
		if (status)
			status = -EFAULT;
	}
	if (!status) {
		status =
		    cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
		if (status)
			status = -EFAULT;
	}
	if (!status) {
		status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
		if (status)
			status = -EFAULT;
	}
	if (!status) {
		
		(void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
					      &hio_mgr->ext_proc_info,
					      sizeof(struct
						     mgr_processorextinfo),
					      &num_procs);

		
		ndx = 0;
		ul_gpp_pa = host_res->mem_phys[1];
		ul_gpp_va = host_res->mem_base[1];
		
		
		ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
		ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
		ul_seg1_size =
		    (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
		
		ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
		
		ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
		ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
						    UL_PAGE_ALIGN_SIZE);
		if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
			ul_pad_size = 0x0;

		dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
			"shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
			"ul_seg_size %x ul_seg1_size %x \n", __func__,
			ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
			ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);

		if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
		    host_res->mem_length[1]) {
			pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
			       __func__, host_res->mem_length[1],
			       ul_seg_size + ul_seg1_size + ul_pad_size);
			status = -ENOMEM;
		}
	}
	if (status)
		goto func_end;

	pa_curr = ul_gpp_pa;
	va_curr = ul_dyn_ext_base * hio_mgr->word_size;
	gpp_va_curr = ul_gpp_va;
	num_bytes = ul_seg1_size;

	map_attrs = 0x00000000;
	map_attrs = DSP_MAPLITTLEENDIAN;
	map_attrs |= DSP_MAPPHYSICALADDR;
	map_attrs |= DSP_MAPELEMSIZE32;
	map_attrs |= DSP_MAPDONOTLOCK;

	while (num_bytes) {
		all_bits = pa_curr | va_curr;
		dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
			"num_bytes %x\n", all_bits, pa_curr, va_curr,
			num_bytes);
		for (i = 0; i < 4; i++) {
			if ((num_bytes >= page_size[i]) && ((all_bits &
							     (page_size[i] -
							      1)) == 0)) {
				status =
				    hio_mgr->intf_fxns->
				    brd_mem_map(hio_mgr->bridge_context,
						    pa_curr, va_curr,
						    page_size[i], map_attrs,
						    NULL);
				if (status)
					goto func_end;
				pa_curr += page_size[i];
				va_curr += page_size[i];
				gpp_va_curr += page_size[i];
				num_bytes -= page_size[i];
				break;
			}
		}
	}
	pa_curr += ul_pad_size;
	va_curr += ul_pad_size;
	gpp_va_curr += ul_pad_size;

	
	num_bytes = ul_seg_size;
	va_curr = ul_dsp_va * hio_mgr->word_size;
	while (num_bytes) {
		all_bits = pa_curr | va_curr;
		dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
			"va_curr %x, num_bytes %x\n", all_bits, pa_curr,
			va_curr, num_bytes);
		for (i = 0; i < 4; i++) {
			if (!(num_bytes >= page_size[i]) ||
			    !((all_bits & (page_size[i] - 1)) == 0))
				continue;
			if (ndx < MAX_LOCK_TLB_ENTRIES) {
				/*
				 * This is the physical address written to
				 * DSP MMU.
				 */
				ae_proc[ndx].gpp_pa = pa_curr;
				ae_proc[ndx].gpp_va = gpp_va_curr;
				ae_proc[ndx].dsp_va =
				    va_curr / hio_mgr->word_size;
				ae_proc[ndx].size = page_size[i];
				ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
				ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
				ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
				dev_dbg(bridge, "shm MMU TLB entry PA %x"
					" VA %x DSP_VA %x Size %x\n",
					ae_proc[ndx].gpp_pa,
					ae_proc[ndx].gpp_va,
					ae_proc[ndx].dsp_va *
					hio_mgr->word_size, page_size[i]);
				ndx++;
			} else {
				status =
				    hio_mgr->intf_fxns->
				    brd_mem_map(hio_mgr->bridge_context,
						    pa_curr, va_curr,
						    page_size[i], map_attrs,
						    NULL);
				dev_dbg(bridge,
					"shm MMU PTE entry PA %x"
					" VA %x DSP_VA %x Size %x\n",
					ae_proc[ndx].gpp_pa,
					ae_proc[ndx].gpp_va,
					ae_proc[ndx].dsp_va *
					hio_mgr->word_size, page_size[i]);
				if (status)
					goto func_end;
			}
			pa_curr += page_size[i];
			va_curr += page_size[i];
			gpp_va_curr += page_size[i];
			num_bytes -= page_size[i];
			break;
		}
	}

	for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
		if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0)
			continue;

		if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys >
		     ul_gpp_pa - 0x100000
		     && hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <=
		     ul_gpp_pa + ul_seg_size)
		    || (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt >
			ul_dsp_va - 0x100000 / hio_mgr->word_size
			&& hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt <=
			ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
			dev_dbg(bridge,
				"CDB MMU entry %d conflicts with "
				"shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
				"GppPa %x, DspVa %x, Bytes %x.\n", i,
				hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys,
				hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt,
				ul_gpp_pa, ul_dsp_va, ul_seg_size);
			status = -EPERM;
		} else {
			if (ndx < MAX_LOCK_TLB_ENTRIES) {
				ae_proc[ndx].dsp_va =
				    hio_mgr->ext_proc_info.ty_tlb[i].
				    dsp_virt;
				ae_proc[ndx].gpp_pa =
				    hio_mgr->ext_proc_info.ty_tlb[i].
				    gpp_phys;
				ae_proc[ndx].gpp_va = 0;
				
				ae_proc[ndx].size = 0x100000;
				dev_dbg(bridge, "shm MMU entry PA %x "
					"DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa,
					ae_proc[ndx].dsp_va);
				ndx++;
			} else {
				status = hio_mgr->intf_fxns->brd_mem_map
				    (hio_mgr->bridge_context,
				     hio_mgr->ext_proc_info.ty_tlb[i].
				     gpp_phys,
				     hio_mgr->ext_proc_info.ty_tlb[i].
				     dsp_virt, 0x100000, map_attrs,
				     NULL);
			}
		}
		if (status)
			goto func_end;
	}

	map_attrs = 0x00000000;
	map_attrs = DSP_MAPLITTLEENDIAN;
	map_attrs |= DSP_MAPPHYSICALADDR;
	map_attrs |= DSP_MAPELEMSIZE32;
	map_attrs |= DSP_MAPDONOTLOCK;

	
	i = 0;
	while (l4_peripheral_table[i].phys_addr) {
		status = hio_mgr->intf_fxns->brd_mem_map
		    (hio_mgr->bridge_context, l4_peripheral_table[i].phys_addr,
		     l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
		     map_attrs, NULL);
		if (status)
			goto func_end;
		i++;
	}

	for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
		ae_proc[i].dsp_va = 0;
		ae_proc[i].gpp_pa = 0;
		ae_proc[i].gpp_va = 0;
		ae_proc[i].size = 0;
	}
	hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
	    (ul_gpp_va + ul_seg1_size + ul_pad_size);

	if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) {
		status = -EFAULT;
		goto func_end;
	} else {
		if (ae_proc[0].dsp_va > ul_shm_base) {
			status = -EPERM;
			goto func_end;
		}
		
		ul_shm_base_offset = (ul_shm_base - ae_proc[0].dsp_va) *
		    hio_mgr->word_size;

		status =
		    hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
						      BRDIOCTL_SETMMUCONFIG,
						      ae_proc);
		if (status)
			goto func_end;
		ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
		ul_shm_base += ul_shm_base_offset;
		ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
						       ul_mem_length);
		if (ul_shm_base == 0) {
			status = -EFAULT;
			goto func_end;
		}
		
		status =
		    register_shm_segs(hio_mgr, cod_man, ae_proc[0].gpp_pa);
	}

	hio_mgr->shared_mem = (struct shm *)ul_shm_base;
	hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
	hio_mgr->output = hio_mgr->input + (ul_shm_length -
					    sizeof(struct shm)) / 2;
	hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input;

	
	hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
						      + ul_shm_length);
	hio_mgr->msg_input =
	    (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
	hio_mgr->msg_output_ctrl =
	    (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
				ul_msg_length / 2);
	hio_mgr->msg_output =
	    (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
	hmsg_mgr->max_msgs =
	    ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
	    / sizeof(struct msg_dspmsg);
	dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
		"output %p, msg_input_ctrl %p, msg_input %p, "
		"msg_output_ctrl %p, msg_output %p\n",
		(u8 *) hio_mgr->shared_mem, hio_mgr->input,
		hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
		hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
		hio_mgr->msg_output);
	dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
		hmsg_mgr->max_msgs);
	memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));

#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
	
	status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
				   &hio_mgr->trace_buffer_begin);
	if (status) {
		status = -EFAULT;
		goto func_end;
	}

	hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin =
	    (ul_gpp_va + ul_seg1_size + ul_pad_size) +
	    (hio_mgr->trace_buffer_begin - ul_dsp_va);
	
	status = cod_get_sym_value(cod_man, SYS_PUTCEND,
				   &hio_mgr->trace_buffer_end);
	if (status) {
		status = -EFAULT;
		goto func_end;
	}
	hio_mgr->trace_buffer_end =
	    (ul_gpp_va + ul_seg1_size + ul_pad_size) +
	    (hio_mgr->trace_buffer_end - ul_dsp_va);
	
	status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
				   &hio_mgr->trace_buffer_current);
	if (status) {
		status = -EFAULT;
		goto func_end;
	}
	hio_mgr->trace_buffer_current =
	    (ul_gpp_va + ul_seg1_size + ul_pad_size) +
	    (hio_mgr->trace_buffer_current - ul_dsp_va);
	
	kfree(hio_mgr->msg);
	hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end -
				hio_mgr->trace_buffer_begin) *
				hio_mgr->word_size) + 2, GFP_KERNEL);
	if (!hio_mgr->msg)
		status = -ENOMEM;

	hio_mgr->dsp_va = ul_dsp_va;
	hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);

#endif
func_end:
	return status;
}
int bridge_io_create(struct io_mgr **io_man,
			    struct dev_object *hdev_obj,
			    const struct io_attrs *mgr_attrts)
{
	struct io_mgr *pio_mgr = NULL;
	struct bridge_dev_context *hbridge_context = NULL;
	struct cfg_devnode *dev_node_obj;
	struct chnl_mgr *hchnl_mgr;
	u8 dev_type;

	
	if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0)
		return -EFAULT;

	*io_man = NULL;

	dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
	if (!hchnl_mgr || hchnl_mgr->iomgr)
		return -EFAULT;

	dev_get_bridge_context(hdev_obj, &hbridge_context);
	if (!hbridge_context)
		return -EFAULT;

	dev_get_dev_type(hdev_obj, &dev_type);

	
	pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
	if (!pio_mgr)
		return -ENOMEM;

	
	pio_mgr->chnl_mgr = hchnl_mgr;
	pio_mgr->word_size = mgr_attrts->word_size;

	if (dev_type == DSP_UNIT) {
		
		tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);

		
		pio_mgr->dpc_req = 0;
		pio_mgr->dpc_sched = 0;

		spin_lock_init(&pio_mgr->dpc_lock);

		if (dev_get_dev_node(hdev_obj, &dev_node_obj)) {
			bridge_io_destroy(pio_mgr);
			return -EIO;
		}
	}

	pio_mgr->bridge_context = hbridge_context;
	pio_mgr->shared_irq = mgr_attrts->irq_shared;
	if (dsp_wdt_init()) {
		bridge_io_destroy(pio_mgr);
		return -EPERM;
	}

	
	hchnl_mgr->iomgr = pio_mgr;
	*io_man = pio_mgr;

	return 0;
}
Example #10
0
/*
 *  ======== node_allocate ========
 *  Purpose:
 *      Allocate GPP resources to manage a node on the DSP.
 */
int node_allocate(struct proc_object *hprocessor,
			const struct dsp_uuid *node_uuid,
			const struct dsp_cbdata *pargs,
			const struct dsp_nodeattrin *attr_in,
			struct node_res_object **noderes,
			struct process_context *pr_ctxt)
{
	struct node_mgr *hnode_mgr;
	struct dev_object *hdev_obj;
	struct node_object *pnode = NULL;
	enum node_type node_type = NODE_TASK;
	struct node_msgargs *pmsg_args;
	struct node_taskargs *ptask_args;
	u32 num_streams;
	struct bridge_drv_interface *intf_fxns;
	int status = 0;
	struct cmm_object *hcmm_mgr = NULL;	/* Shared memory manager hndl */
	u32 proc_id;
	u32 pul_value;
	u32 dynext_base;
	u32 off_set = 0;
	u32 ul_stack_seg_addr, ul_stack_seg_val;
	u32 ul_gpp_mem_base;
	struct cfg_hostres *host_res;
	struct bridge_dev_context *pbridge_context;
	u32 mapped_addr = 0;
	u32 map_attrs = 0x0;
	struct dsp_processorstate proc_state;
#ifdef DSP_DMM_DEBUG
	struct dmm_object *dmm_mgr;
	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
#endif

	void *node_res;

	DBC_REQUIRE(refs > 0);
	DBC_REQUIRE(hprocessor != NULL);
	DBC_REQUIRE(noderes != NULL);
	DBC_REQUIRE(node_uuid != NULL);

	*noderes = NULL;

	status = proc_get_processor_id(hprocessor, &proc_id);

	if (proc_id != DSP_UNIT)
		goto func_end;

	status = proc_get_dev_object(hprocessor, &hdev_obj);
	if (!status) {
		status = dev_get_node_manager(hdev_obj, &hnode_mgr);
		if (hnode_mgr == NULL)
			status = -EPERM;

	}

	if (status)
		goto func_end;

	status = dev_get_bridge_context(hdev_obj, &pbridge_context);
	if (!pbridge_context) {
		status = -EFAULT;
		goto func_end;
	}

	status = proc_get_state(hprocessor, &proc_state,
				sizeof(struct dsp_processorstate));
	if (status)
		goto func_end;
	/* If processor is in error state then don't attempt
	   to send the message */
	if (proc_state.proc_state == PROC_ERROR) {
		status = -EPERM;
		goto func_end;
	}

	/* Assuming that 0 is not a valid function address */
	if (hnode_mgr->ul_fxn_addrs[0] == 0) {
		/* No RMS on target - we currently can't handle this */
		pr_err("%s: Failed, no RMS in base image\n", __func__);
		status = -EPERM;
	} else {
		/* Validate attr_in fields, if non-NULL */
		if (attr_in) {
			/* Check if attr_in->prio is within range */
			if (attr_in->prio < hnode_mgr->min_pri ||
			    attr_in->prio > hnode_mgr->max_pri)
				status = -EDOM;
		}
	}
	/* Allocate node object and fill in */
	if (status)
		goto func_end;

	pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
	if (pnode == NULL) {
		status = -ENOMEM;
		goto func_end;
	}
	pnode->hnode_mgr = hnode_mgr;
	/* This critical section protects get_node_props */
	mutex_lock(&hnode_mgr->node_mgr_lock);

	/* Get dsp_ndbprops from node database */
	status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
				&(pnode->dcd_props));
	if (status)
		goto func_cont;

	pnode->node_uuid = *node_uuid;
	pnode->hprocessor = hprocessor;
	pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
	pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
	pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;

	/* Currently only C64 DSP builds support Node Dynamic * heaps */
	/* Allocate memory for node heap */
	pnode->create_args.asa.task_arg_obj.heap_size = 0;
	pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
	pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
	pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
	if (!attr_in)
		goto func_cont;

	/* Check if we have a user allocated node heap */
	if (!(attr_in->pgpp_virt_addr))
		goto func_cont;

	/* check for page aligned Heap size */
	if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
		pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
		       __func__, attr_in->heap_size);
		status = -EINVAL;
	} else {
		pnode->create_args.asa.task_arg_obj.heap_size =
		    attr_in->heap_size;
		pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
		    (u32) attr_in->pgpp_virt_addr;
	}
	if (status)
		goto func_cont;

	status = proc_reserve_memory(hprocessor,
				     pnode->create_args.asa.task_arg_obj.
				     heap_size + PAGE_SIZE,
				     (void **)&(pnode->create_args.asa.
					task_arg_obj.udsp_heap_res_addr),
				     pr_ctxt);
	if (status) {
		pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
		       __func__, status);
		goto func_cont;
	}
#ifdef DSP_DMM_DEBUG
	status = dmm_get_handle(p_proc_object, &dmm_mgr);
	if (!dmm_mgr) {
		status = DSP_EHANDLE;
		goto func_cont;
	}

	dmm_mem_map_dump(dmm_mgr);
#endif

	map_attrs |= DSP_MAPLITTLEENDIAN;
	map_attrs |= DSP_MAPELEMSIZE32;
	map_attrs |= DSP_MAPVIRTUALADDR;
	status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
			  pnode->create_args.asa.task_arg_obj.heap_size,
			  (void *)pnode->create_args.asa.task_arg_obj.
			  udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
			  pr_ctxt);
	if (status)
		pr_err("%s: Failed to map memory for Heap: 0x%x\n",
		       __func__, status);
	else
		pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
		    (u32) mapped_addr;

func_cont:
	mutex_unlock(&hnode_mgr->node_mgr_lock);
	if (attr_in != NULL) {
		/* Overrides of NBD properties */
		pnode->utimeout = attr_in->utimeout;
		pnode->prio = attr_in->prio;
	}
	/* Create object to manage notifications */
	if (!status) {
		pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (pnode->ntfy_obj)
			ntfy_init(pnode->ntfy_obj);
		else
			status = -ENOMEM;
	}

	if (!status) {
		node_type = node_get_type(pnode);
		/*  Allocate dsp_streamconnect array for device, task, and
		 *  dais socket nodes. */
		if (node_type != NODE_MESSAGE) {
			num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
			pnode->stream_connect = kzalloc(num_streams *
					sizeof(struct dsp_streamconnect),
					GFP_KERNEL);
			if (num_streams > 0 && pnode->stream_connect == NULL)
				status = -ENOMEM;

		}
		if (!status && (node_type == NODE_TASK ||
					      node_type == NODE_DAISSOCKET)) {
			/* Allocate arrays for maintainig stream connections */
			pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
					sizeof(struct stream_chnl), GFP_KERNEL);
			pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
					sizeof(struct stream_chnl), GFP_KERNEL);
			ptask_args = &(pnode->create_args.asa.task_arg_obj);
			ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
						sizeof(struct node_strmdef),
						GFP_KERNEL);
			ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
						sizeof(struct node_strmdef),
						GFP_KERNEL);
			if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
						       ptask_args->strm_in_def
						       == NULL))
			    || (MAX_OUTPUTS(pnode) > 0
				&& (pnode->outputs == NULL
				    || ptask_args->strm_out_def == NULL)))
				status = -ENOMEM;
		}
	}
	if (!status && (node_type != NODE_DEVICE)) {
		/* Create an event that will be posted when RMS_EXIT is
		 * received. */
		pnode->sync_done = kzalloc(sizeof(struct sync_object),
								GFP_KERNEL);
		if (pnode->sync_done)
			sync_init_event(pnode->sync_done);
		else
			status = -ENOMEM;

		if (!status) {
			/*Get the shared mem mgr for this nodes dev object */
			status = cmm_get_handle(hprocessor, &hcmm_mgr);
			if (!status) {
				/* Allocate a SM addr translator for this node
				 * w/ deflt attr */
				status = cmm_xlator_create(&pnode->xlator,
							   hcmm_mgr, NULL);
			}
		}
		if (!status) {
			/* Fill in message args */
			if ((pargs != NULL) && (pargs->cb_data > 0)) {
				pmsg_args =
				    &(pnode->create_args.asa.node_msg_args);
				pmsg_args->pdata = kzalloc(pargs->cb_data,
								GFP_KERNEL);
				if (pmsg_args->pdata == NULL) {
					status = -ENOMEM;
				} else {
					pmsg_args->arg_length = pargs->cb_data;
					memcpy(pmsg_args->pdata,
					       pargs->node_data,
					       pargs->cb_data);
				}
			}
		}
	}

	if (!status && node_type != NODE_DEVICE) {
		/* Create a message queue for this node */
		intf_fxns = hnode_mgr->intf_fxns;
		status =
		    (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
							&pnode->msg_queue_obj,
							0,
							pnode->create_args.asa.
							node_msg_args.max_msgs,
							pnode);
	}

	if (!status) {
		/* Create object for dynamic loading */

		status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
							   (void *)pnode,
							   &pnode->dcd_props.
							   obj_data.node_obj,
							   &pnode->
							   nldr_node_obj,
							   &pnode->phase_split);
	}

	/* Compare value read from Node Properties and check if it is same as
	 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
	 * GPP Address, Read the value in that address and override the
	 * stack_seg value in task args */
	if (!status &&
	    (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
	    stack_seg_name != NULL) {
		if (strcmp((char *)
			   pnode->dcd_props.obj_data.node_obj.ndb_props.
			   stack_seg_name, STACKSEGLABEL) == 0) {
			status =
			    hnode_mgr->nldr_fxns.
			    pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
					     &dynext_base);
			if (status)
				pr_err("%s: Failed to get addr for DYNEXT_BEG"
				       " status = 0x%x\n", __func__, status);

			status =
			    hnode_mgr->nldr_fxns.
			    pfn_get_fxn_addr(pnode->nldr_node_obj,
					     "L1DSRAM_HEAP", &pul_value);

			if (status)
				pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
				       " status = 0x%x\n", __func__, status);

			host_res = pbridge_context->resources;
			if (!host_res)
				status = -EPERM;

			if (status) {
				pr_err("%s: Failed to get host resource, status"
				       " = 0x%x\n", __func__, status);
				goto func_end;
			}

			ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
			off_set = pul_value - dynext_base;
			ul_stack_seg_addr = ul_gpp_mem_base + off_set;
			ul_stack_seg_val = readl(ul_stack_seg_addr);

			dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
				" 0x%x\n", __func__, ul_stack_seg_val,
				ul_stack_seg_addr);

			pnode->create_args.asa.task_arg_obj.stack_seg =
			    ul_stack_seg_val;

		}
	}

	if (!status) {
		/* Add the node to the node manager's list of allocated
		 * nodes. */
		lst_init_elem((struct list_head *)pnode);
		NODE_SET_STATE(pnode, NODE_ALLOCATED);

		mutex_lock(&hnode_mgr->node_mgr_lock);

		lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
			++(hnode_mgr->num_nodes);

		/* Exit critical section */
		mutex_unlock(&hnode_mgr->node_mgr_lock);

		/* Preset this to assume phases are split
		 * (for overlay and dll) */
		pnode->phase_split = true;

		/* Notify all clients registered for DSP_NODESTATECHANGE. */
		proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
	} else {
		/* Cleanup */
		if (pnode)
			delete_node(pnode, pr_ctxt);

	}

	if (!status) {
		status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
		if (status) {
			delete_node(pnode, pr_ctxt);
			goto func_end;
		}

		*noderes = (struct node_res_object *)node_res;
		drv_proc_node_update_heap_status(node_res, true);
		drv_proc_node_update_status(node_res, true);
	}
	DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
func_end:
	dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
		"node_res: %p status: 0x%x\n", __func__, hprocessor,
		node_uuid, pargs, attr_in, noderes, status);
	return status;
}