Esempio n. 1
0
int bridge_deh_create(struct deh_mgr **ret_deh,
		struct dev_object *hdev_obj)
{
	int status;
	struct deh_mgr *deh;
	struct bridge_dev_context *hbridge_context = NULL;

	/*  Message manager will be created when a file is loaded, since
	 *  size of message buffer in shared memory is configurable in
	 *  the base image. */
	/* Get Bridge context info. */
	dev_get_bridge_context(hdev_obj, &hbridge_context);
	/* Allocate IO manager object: */
	deh = kzalloc(sizeof(*deh), GFP_KERNEL);
	if (!deh) {
		status = -ENOMEM;
		goto err;
	}

	/* Create an NTFY object to manage notifications */
	deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
	if (!deh->ntfy_obj) {
		status = -ENOMEM;
		goto err;
	}
	ntfy_init(deh->ntfy_obj);

	/* Create a MMUfault DPC */
	tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);

	/* Fill in context structure */
	deh->bridge_context = hbridge_context;

	/* Install ISR function for DSP MMU fault */
	status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
			"DspBridge\tiommu fault", deh);
	if (status < 0)
		goto err;

	*ret_deh = deh;
	return 0;

err:
	bridge_deh_destroy(deh);
	*ret_deh = NULL;
	return status;
}
Esempio n. 2
0
/*
 *  ======== bridge_chnl_open ========
 *      Open a new half-duplex channel to the DSP board.
 */
int bridge_chnl_open(OUT struct chnl_object **phChnl,
			    struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
			    u32 uChnlId, CONST IN struct chnl_attr *pattrs)
{
	int status = 0;
	struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
	struct chnl_object *pchnl = NULL;
	struct sync_object *sync_event = NULL;
	/* Ensure DBC requirements: */
	DBC_REQUIRE(phChnl != NULL);
	DBC_REQUIRE(pattrs != NULL);
	DBC_REQUIRE(hchnl_mgr != NULL);
	*phChnl = NULL;

	/* Validate Args: */
	if (pattrs->uio_reqs == 0) {
		status = -EINVAL;
	} else {
		if (!hchnl_mgr) {
			status = -EFAULT;
		} else {
			if (uChnlId != CHNL_PICKFREE) {
				if (uChnlId >= chnl_mgr_obj->max_channels)
					status = -ECHRNG;
				else if (chnl_mgr_obj->ap_channel[uChnlId] !=
					 NULL)
					status = -EALREADY;
			} else {
				/* Check for free channel */
				status =
				    search_free_channel(chnl_mgr_obj, &uChnlId);
			}
		}
	}
	if (DSP_FAILED(status))
		goto func_end;

	DBC_ASSERT(uChnlId < chnl_mgr_obj->max_channels);
	/* Create channel object: */
	pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
	if (!pchnl) {
		status = -ENOMEM;
		goto func_end;
	}
	/* Protect queues from io_dpc: */
	pchnl->dw_state = CHNL_STATECANCEL;
	/* Allocate initial IOR and IOC queues: */
	pchnl->free_packets_list = create_chirp_list(pattrs->uio_reqs);
	pchnl->pio_requests = create_chirp_list(0);
	pchnl->pio_completions = create_chirp_list(0);
	pchnl->chnl_packets = pattrs->uio_reqs;
	pchnl->cio_cs = 0;
	pchnl->cio_reqs = 0;
	sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
	if (sync_event)
		sync_init_event(sync_event);
	else
		status = -ENOMEM;

	if (DSP_SUCCEEDED(status)) {
		pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (pchnl->ntfy_obj)
			ntfy_init(pchnl->ntfy_obj);
		else
			status = -ENOMEM;
	}

	if (DSP_SUCCEEDED(status)) {
		if (pchnl->pio_completions && pchnl->pio_requests &&
		    pchnl->free_packets_list) {
			/* Initialize CHNL object fields: */
			pchnl->chnl_mgr_obj = chnl_mgr_obj;
			pchnl->chnl_id = uChnlId;
			pchnl->chnl_mode = chnl_mode;
			pchnl->user_event = sync_event;	/* for Linux */
			pchnl->sync_event = sync_event;
			/* Get the process handle */
			pchnl->process = current->tgid;
			pchnl->pcb_arg = 0;
			pchnl->bytes_moved = 0;
			/* Default to proc-copy */
			pchnl->chnl_type = CHNL_PCPY;
		} else {
			status = -ENOMEM;
		}
	}

	if (DSP_FAILED(status)) {
		/* Free memory */
		if (pchnl->pio_completions) {
			free_chirp_list(pchnl->pio_completions);
			pchnl->pio_completions = NULL;
			pchnl->cio_cs = 0;
		}
		if (pchnl->pio_requests) {
			free_chirp_list(pchnl->pio_requests);
			pchnl->pio_requests = NULL;
		}
		if (pchnl->free_packets_list) {
			free_chirp_list(pchnl->free_packets_list);
			pchnl->free_packets_list = NULL;
		}
		kfree(sync_event);
		sync_event = NULL;

		if (pchnl->ntfy_obj) {
			ntfy_delete(pchnl->ntfy_obj);
			kfree(pchnl->ntfy_obj);
			pchnl->ntfy_obj = NULL;
		}
		kfree(pchnl);
	} else {
		/* Insert channel object in channel manager: */
		chnl_mgr_obj->ap_channel[pchnl->chnl_id] = pchnl;
		spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
		chnl_mgr_obj->open_channels++;
		spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
		/* Return result... */
		pchnl->dw_state = CHNL_STATEREADY;
		*phChnl = pchnl;
	}
func_end:
	DBC_ENSURE((DSP_SUCCEEDED(status) && pchnl) || (*phChnl == NULL));
	return status;
}
Esempio n. 3
0
/*
 *  ======== bridge_msg_create_queue ========
 *      Create a msg_queue for sending/receiving messages to/from a node
 *      on the DSP.
 */
int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
				OUT struct msg_queue **phMsgQueue,
				u32 msgq_id, u32 max_msgs, bhandle arg)
{
	u32 i;
	u32 num_allocated = 0;
	struct msg_queue *msg_q;
	int status = 0;

	if (!hmsg_mgr || phMsgQueue == NULL || !hmsg_mgr->msg_free_list) {
		status = -EFAULT;
		goto func_end;
	}

	*phMsgQueue = NULL;
	/* Allocate msg_queue object */
	msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
	if (!msg_q) {
		status = -ENOMEM;
		goto func_end;
	}
	lst_init_elem((struct list_head *)msg_q);
	msg_q->max_msgs = max_msgs;
	msg_q->hmsg_mgr = hmsg_mgr;
	msg_q->arg = arg;	/* Node handle */
	msg_q->msgq_id = msgq_id;	/* Node env (not valid yet) */
	/* Queues of Message frames for messages from the DSP */
	msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
	msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
	if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
		status = -ENOMEM;
	else {
		INIT_LIST_HEAD(&msg_q->msg_free_list->head);
		INIT_LIST_HEAD(&msg_q->msg_used_list->head);
	}

	/*  Create event that will be signalled when a message from
	 *  the DSP is available. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_event = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_event)
			sync_init_event(msg_q->sync_event);
		else
			status = -ENOMEM;
	}

	/* Create a notification list for message ready notification. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (msg_q->ntfy_obj)
			ntfy_init(msg_q->ntfy_obj);
		else
			status = -ENOMEM;
	}

	/*  Create events that will be used to synchronize cleanup
	 *  when the object is deleted. sync_done will be set to
	 *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
	 *  will be set by the unblocked thread to signal that it
	 *  is unblocked and will no longer reference the object. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_done = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_done)
			sync_init_event(msg_q->sync_done);
		else
			status = -ENOMEM;
	}

	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_done_ack)
			sync_init_event(msg_q->sync_done_ack);
		else
			status = -ENOMEM;
	}

	if (DSP_SUCCEEDED(status)) {
		/* Enter critical section */
		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
		/* Initialize message frames and put in appropriate queues */
		for (i = 0; i < max_msgs && DSP_SUCCEEDED(status); i++) {
			status = add_new_msg(hmsg_mgr->msg_free_list);
			if (DSP_SUCCEEDED(status)) {
				num_allocated++;
				status = add_new_msg(msg_q->msg_free_list);
			}
		}
		if (DSP_FAILED(status)) {
			/*  Stay inside CS to prevent others from taking any
			 *  of the newly allocated message frames. */
			delete_msg_queue(msg_q, num_allocated);
		} else {
			lst_put_tail(hmsg_mgr->queue_list,
				     (struct list_head *)msg_q);
			*phMsgQueue = msg_q;
			/* Signal that free frames are now available */
			if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
				sync_set_event(hmsg_mgr->sync_event);

		}
		/* Exit critical section */
		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
	} else {
		delete_msg_queue(msg_q, 0);
	}
func_end:
	return status;
}
Esempio n. 4
0
/*
 *  ======== bridge_deh_create ========
 *      Creates DEH manager object.
 */
int bridge_deh_create(OUT struct deh_mgr **phDehMgr,
			     struct dev_object *hdev_obj)
{
	int status = 0;
	struct deh_mgr *deh_mgr_obj = NULL;
	struct wmd_dev_context *hwmd_context = NULL;

	/*  Message manager will be created when a file is loaded, since
	 *  size of message buffer in shared memory is configurable in
	 *  the base image. */
	/* Get WMD context info. */
	dev_get_wmd_context(hdev_obj, &hwmd_context);
	DBC_ASSERT(hwmd_context);
	dummy_va_addr = 0;
	/* Allocate IO manager object: */
	deh_mgr_obj = kzalloc(sizeof(struct deh_mgr), GFP_KERNEL);
	if (deh_mgr_obj == NULL) {
		status = -ENOMEM;
	} else {
		/* Create an NTFY object to manage notifications */
		deh_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (deh_mgr_obj->ntfy_obj)
			ntfy_init(deh_mgr_obj->ntfy_obj);
		else
			status = -ENOMEM;

		deh_mgr_obj->mmu_wq = create_workqueue("dsp-mmu_wq");
		if (!deh_mgr_obj->mmu_wq)
			status = -ENOMEM;

		INIT_WORK(&deh_mgr_obj->fault_work, mmu_fault_work);


		if (DSP_SUCCEEDED(status)) {
			/* Fill in context structure */
			deh_mgr_obj->hwmd_context = hwmd_context;
			deh_mgr_obj->err_info.dw_err_mask = 0L;
			deh_mgr_obj->err_info.dw_val1 = 0L;
			deh_mgr_obj->err_info.dw_val2 = 0L;
			deh_mgr_obj->err_info.dw_val3 = 0L;
			/* Install ISR function for DSP MMU fault */
			if ((request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
					 "DspBridge\tiommu fault",
					 (void *)deh_mgr_obj)) == 0)
				status = 0;
			else
				status = -EPERM;
		}
	}
	if (DSP_FAILED(status)) {
		/* If create failed, cleanup */
		bridge_deh_destroy((struct deh_mgr *)deh_mgr_obj);
		*phDehMgr = NULL;
	} else {
		timer = omap_dm_timer_request_specific(
					GPTIMER_FOR_DSP_MMU_FAULT);
		if (timer)
			omap_dm_timer_disable(timer);
		else {
			pr_err("%s:GPTimer not available\n", __func__);
			return -ENODEV;
		}
		*phDehMgr = (struct deh_mgr *)deh_mgr_obj;
	}

	return status;
}
Esempio n. 5
0
/*
 *  ======== bridge_msg_create_queue ========
 *      Create a msg_queue for sending/receiving messages to/from a node
 *      on the DSP.
 */
int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq,
				u32 msgq_id, u32 max_msgs, void *arg)
{
	u32 i;
	u32 num_allocated = 0;
	struct msg_queue *msg_q;
	int status = 0;

	if (!hmsg_mgr || msgq == NULL)
		return -EFAULT;

	*msgq = NULL;
	/* Allocate msg_queue object */
	msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
	if (!msg_q)
		return -ENOMEM;

	msg_q->max_msgs = max_msgs;
	msg_q->msg_mgr = hmsg_mgr;
	msg_q->arg = arg;	/* Node handle */
	msg_q->msgq_id = msgq_id;	/* Node env (not valid yet) */
	/* Queues of Message frames for messages from the DSP */
	INIT_LIST_HEAD(&msg_q->msg_free_list);
	INIT_LIST_HEAD(&msg_q->msg_used_list);

	/*  Create event that will be signalled when a message from
	 *  the DSP is available. */
	msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
	if (!msg_q->sync_event) {
		status = -ENOMEM;
		goto out_err;

	}
	sync_init_event(msg_q->sync_event);

	/* Create a notification list for message ready notification. */
	msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
	if (!msg_q->ntfy_obj) {
		status = -ENOMEM;
		goto out_err;
	}
	ntfy_init(msg_q->ntfy_obj);

	/*  Create events that will be used to synchronize cleanup
	 *  when the object is deleted. sync_done will be set to
	 *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
	 *  will be set by the unblocked thread to signal that it
	 *  is unblocked and will no longer reference the object. */
	msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
	if (!msg_q->sync_done) {
		status = -ENOMEM;
		goto out_err;
	}
	sync_init_event(msg_q->sync_done);

	msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
	if (!msg_q->sync_done_ack) {
		status = -ENOMEM;
		goto out_err;
	}
	sync_init_event(msg_q->sync_done_ack);

	/* Enter critical section */
	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
	/* Initialize message frames and put in appropriate queues */
	for (i = 0; i < max_msgs && !status; i++) {
		status = add_new_msg(&hmsg_mgr->msg_free_list);
		if (!status) {
			num_allocated++;
			status = add_new_msg(&msg_q->msg_free_list);
		}
	}
	if (status) {
		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
		goto out_err;
	}

	list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list);
	*msgq = msg_q;
	/* Signal that free frames are now available */
	if (!list_empty(&hmsg_mgr->msg_free_list))
		sync_set_event(hmsg_mgr->sync_event);

	/* Exit critical section */
	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);

	return 0;
out_err:
	delete_msg_queue(msg_q, num_allocated);
	return status;
}
Esempio n. 6
0
/*
 *  ======== node_allocate ========
 *  Purpose:
 *      Allocate GPP resources to manage a node on the DSP.
 */
int node_allocate(struct proc_object *hprocessor,
			const struct dsp_uuid *node_uuid,
			const struct dsp_cbdata *pargs,
			const struct dsp_nodeattrin *attr_in,
			struct node_res_object **noderes,
			struct process_context *pr_ctxt)
{
	struct node_mgr *hnode_mgr;
	struct dev_object *hdev_obj;
	struct node_object *pnode = NULL;
	enum node_type node_type = NODE_TASK;
	struct node_msgargs *pmsg_args;
	struct node_taskargs *ptask_args;
	u32 num_streams;
	struct bridge_drv_interface *intf_fxns;
	int status = 0;
	struct cmm_object *hcmm_mgr = NULL;	/* Shared memory manager hndl */
	u32 proc_id;
	u32 pul_value;
	u32 dynext_base;
	u32 off_set = 0;
	u32 ul_stack_seg_addr, ul_stack_seg_val;
	u32 ul_gpp_mem_base;
	struct cfg_hostres *host_res;
	struct bridge_dev_context *pbridge_context;
	u32 mapped_addr = 0;
	u32 map_attrs = 0x0;
	struct dsp_processorstate proc_state;
#ifdef DSP_DMM_DEBUG
	struct dmm_object *dmm_mgr;
	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
#endif

	void *node_res;

	DBC_REQUIRE(refs > 0);
	DBC_REQUIRE(hprocessor != NULL);
	DBC_REQUIRE(noderes != NULL);
	DBC_REQUIRE(node_uuid != NULL);

	*noderes = NULL;

	status = proc_get_processor_id(hprocessor, &proc_id);

	if (proc_id != DSP_UNIT)
		goto func_end;

	status = proc_get_dev_object(hprocessor, &hdev_obj);
	if (!status) {
		status = dev_get_node_manager(hdev_obj, &hnode_mgr);
		if (hnode_mgr == NULL)
			status = -EPERM;

	}

	if (status)
		goto func_end;

	status = dev_get_bridge_context(hdev_obj, &pbridge_context);
	if (!pbridge_context) {
		status = -EFAULT;
		goto func_end;
	}

	status = proc_get_state(hprocessor, &proc_state,
				sizeof(struct dsp_processorstate));
	if (status)
		goto func_end;
	/* If processor is in error state then don't attempt
	   to send the message */
	if (proc_state.proc_state == PROC_ERROR) {
		status = -EPERM;
		goto func_end;
	}

	/* Assuming that 0 is not a valid function address */
	if (hnode_mgr->ul_fxn_addrs[0] == 0) {
		/* No RMS on target - we currently can't handle this */
		pr_err("%s: Failed, no RMS in base image\n", __func__);
		status = -EPERM;
	} else {
		/* Validate attr_in fields, if non-NULL */
		if (attr_in) {
			/* Check if attr_in->prio is within range */
			if (attr_in->prio < hnode_mgr->min_pri ||
			    attr_in->prio > hnode_mgr->max_pri)
				status = -EDOM;
		}
	}
	/* Allocate node object and fill in */
	if (status)
		goto func_end;

	pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
	if (pnode == NULL) {
		status = -ENOMEM;
		goto func_end;
	}
	pnode->hnode_mgr = hnode_mgr;
	/* This critical section protects get_node_props */
	mutex_lock(&hnode_mgr->node_mgr_lock);

	/* Get dsp_ndbprops from node database */
	status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
				&(pnode->dcd_props));
	if (status)
		goto func_cont;

	pnode->node_uuid = *node_uuid;
	pnode->hprocessor = hprocessor;
	pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
	pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
	pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;

	/* Currently only C64 DSP builds support Node Dynamic * heaps */
	/* Allocate memory for node heap */
	pnode->create_args.asa.task_arg_obj.heap_size = 0;
	pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
	pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
	pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
	if (!attr_in)
		goto func_cont;

	/* Check if we have a user allocated node heap */
	if (!(attr_in->pgpp_virt_addr))
		goto func_cont;

	/* check for page aligned Heap size */
	if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
		pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
		       __func__, attr_in->heap_size);
		status = -EINVAL;
	} else {
		pnode->create_args.asa.task_arg_obj.heap_size =
		    attr_in->heap_size;
		pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
		    (u32) attr_in->pgpp_virt_addr;
	}
	if (status)
		goto func_cont;

	status = proc_reserve_memory(hprocessor,
				     pnode->create_args.asa.task_arg_obj.
				     heap_size + PAGE_SIZE,
				     (void **)&(pnode->create_args.asa.
					task_arg_obj.udsp_heap_res_addr),
				     pr_ctxt);
	if (status) {
		pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
		       __func__, status);
		goto func_cont;
	}
#ifdef DSP_DMM_DEBUG
	status = dmm_get_handle(p_proc_object, &dmm_mgr);
	if (!dmm_mgr) {
		status = DSP_EHANDLE;
		goto func_cont;
	}

	dmm_mem_map_dump(dmm_mgr);
#endif

	map_attrs |= DSP_MAPLITTLEENDIAN;
	map_attrs |= DSP_MAPELEMSIZE32;
	map_attrs |= DSP_MAPVIRTUALADDR;
	status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
			  pnode->create_args.asa.task_arg_obj.heap_size,
			  (void *)pnode->create_args.asa.task_arg_obj.
			  udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
			  pr_ctxt);
	if (status)
		pr_err("%s: Failed to map memory for Heap: 0x%x\n",
		       __func__, status);
	else
		pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
		    (u32) mapped_addr;

func_cont:
	mutex_unlock(&hnode_mgr->node_mgr_lock);
	if (attr_in != NULL) {
		/* Overrides of NBD properties */
		pnode->utimeout = attr_in->utimeout;
		pnode->prio = attr_in->prio;
	}
	/* Create object to manage notifications */
	if (!status) {
		pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (pnode->ntfy_obj)
			ntfy_init(pnode->ntfy_obj);
		else
			status = -ENOMEM;
	}

	if (!status) {
		node_type = node_get_type(pnode);
		/*  Allocate dsp_streamconnect array for device, task, and
		 *  dais socket nodes. */
		if (node_type != NODE_MESSAGE) {
			num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
			pnode->stream_connect = kzalloc(num_streams *
					sizeof(struct dsp_streamconnect),
					GFP_KERNEL);
			if (num_streams > 0 && pnode->stream_connect == NULL)
				status = -ENOMEM;

		}
		if (!status && (node_type == NODE_TASK ||
					      node_type == NODE_DAISSOCKET)) {
			/* Allocate arrays for maintainig stream connections */
			pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
					sizeof(struct stream_chnl), GFP_KERNEL);
			pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
					sizeof(struct stream_chnl), GFP_KERNEL);
			ptask_args = &(pnode->create_args.asa.task_arg_obj);
			ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
						sizeof(struct node_strmdef),
						GFP_KERNEL);
			ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
						sizeof(struct node_strmdef),
						GFP_KERNEL);
			if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
						       ptask_args->strm_in_def
						       == NULL))
			    || (MAX_OUTPUTS(pnode) > 0
				&& (pnode->outputs == NULL
				    || ptask_args->strm_out_def == NULL)))
				status = -ENOMEM;
		}
	}
	if (!status && (node_type != NODE_DEVICE)) {
		/* Create an event that will be posted when RMS_EXIT is
		 * received. */
		pnode->sync_done = kzalloc(sizeof(struct sync_object),
								GFP_KERNEL);
		if (pnode->sync_done)
			sync_init_event(pnode->sync_done);
		else
			status = -ENOMEM;

		if (!status) {
			/*Get the shared mem mgr for this nodes dev object */
			status = cmm_get_handle(hprocessor, &hcmm_mgr);
			if (!status) {
				/* Allocate a SM addr translator for this node
				 * w/ deflt attr */
				status = cmm_xlator_create(&pnode->xlator,
							   hcmm_mgr, NULL);
			}
		}
		if (!status) {
			/* Fill in message args */
			if ((pargs != NULL) && (pargs->cb_data > 0)) {
				pmsg_args =
				    &(pnode->create_args.asa.node_msg_args);
				pmsg_args->pdata = kzalloc(pargs->cb_data,
								GFP_KERNEL);
				if (pmsg_args->pdata == NULL) {
					status = -ENOMEM;
				} else {
					pmsg_args->arg_length = pargs->cb_data;
					memcpy(pmsg_args->pdata,
					       pargs->node_data,
					       pargs->cb_data);
				}
			}
		}
	}

	if (!status && node_type != NODE_DEVICE) {
		/* Create a message queue for this node */
		intf_fxns = hnode_mgr->intf_fxns;
		status =
		    (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
							&pnode->msg_queue_obj,
							0,
							pnode->create_args.asa.
							node_msg_args.max_msgs,
							pnode);
	}

	if (!status) {
		/* Create object for dynamic loading */

		status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
							   (void *)pnode,
							   &pnode->dcd_props.
							   obj_data.node_obj,
							   &pnode->
							   nldr_node_obj,
							   &pnode->phase_split);
	}

	/* Compare value read from Node Properties and check if it is same as
	 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
	 * GPP Address, Read the value in that address and override the
	 * stack_seg value in task args */
	if (!status &&
	    (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
	    stack_seg_name != NULL) {
		if (strcmp((char *)
			   pnode->dcd_props.obj_data.node_obj.ndb_props.
			   stack_seg_name, STACKSEGLABEL) == 0) {
			status =
			    hnode_mgr->nldr_fxns.
			    pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
					     &dynext_base);
			if (status)
				pr_err("%s: Failed to get addr for DYNEXT_BEG"
				       " status = 0x%x\n", __func__, status);

			status =
			    hnode_mgr->nldr_fxns.
			    pfn_get_fxn_addr(pnode->nldr_node_obj,
					     "L1DSRAM_HEAP", &pul_value);

			if (status)
				pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
				       " status = 0x%x\n", __func__, status);

			host_res = pbridge_context->resources;
			if (!host_res)
				status = -EPERM;

			if (status) {
				pr_err("%s: Failed to get host resource, status"
				       " = 0x%x\n", __func__, status);
				goto func_end;
			}

			ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
			off_set = pul_value - dynext_base;
			ul_stack_seg_addr = ul_gpp_mem_base + off_set;
			ul_stack_seg_val = readl(ul_stack_seg_addr);

			dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
				" 0x%x\n", __func__, ul_stack_seg_val,
				ul_stack_seg_addr);

			pnode->create_args.asa.task_arg_obj.stack_seg =
			    ul_stack_seg_val;

		}
	}

	if (!status) {
		/* Add the node to the node manager's list of allocated
		 * nodes. */
		lst_init_elem((struct list_head *)pnode);
		NODE_SET_STATE(pnode, NODE_ALLOCATED);

		mutex_lock(&hnode_mgr->node_mgr_lock);

		lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
			++(hnode_mgr->num_nodes);

		/* Exit critical section */
		mutex_unlock(&hnode_mgr->node_mgr_lock);

		/* Preset this to assume phases are split
		 * (for overlay and dll) */
		pnode->phase_split = true;

		/* Notify all clients registered for DSP_NODESTATECHANGE. */
		proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
	} else {
		/* Cleanup */
		if (pnode)
			delete_node(pnode, pr_ctxt);

	}

	if (!status) {
		status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
		if (status) {
			delete_node(pnode, pr_ctxt);
			goto func_end;
		}

		*noderes = (struct node_res_object *)node_res;
		drv_proc_node_update_heap_status(node_res, true);
		drv_proc_node_update_status(node_res, true);
	}
	DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
func_end:
	dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
		"node_res: %p status: 0x%x\n", __func__, hprocessor,
		node_uuid, pargs, attr_in, noderes, status);
	return status;
}