Esempio n. 1
0
/*
 *  ======== delete_node ========
 *  Purpose:
 *      Put a memory node on the cmm nodelist for later use.
 *      Doesn't actually delete the node. Heap thrashing friendly.
 */
static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
{
	DBC_REQUIRE(pnode != NULL);
	lst_init_elem((struct list_head *)pnode);	/* init .self ptr */
	lst_put_tail(cmm_mgr_obj->node_free_list_head,
		     (struct list_head *)pnode);
}
Esempio n. 2
0
/*
 *  ======== get_node ========
 *  Purpose:
 *      Get a memory node from freelist or create a new one.
 */
static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
				  u32 dw_va, u32 ul_size)
{
	struct cmm_mnode *pnode = NULL;

	DBC_REQUIRE(cmm_mgr_obj != NULL);
	DBC_REQUIRE(dw_pa != 0);
	DBC_REQUIRE(dw_va != 0);
	DBC_REQUIRE(ul_size != 0);
	/* Check cmm mgr's node freelist */
	if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
		pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
	} else {
		/* surely a valid element */
		pnode = (struct cmm_mnode *)
		    lst_get_head(cmm_mgr_obj->node_free_list_head);
	}
	if (pnode) {
		lst_init_elem((struct list_head *)pnode);	/* set self */
		pnode->dw_pa = dw_pa;	/* Physical addr of start of block */
		pnode->dw_va = dw_va;	/* Virtual   "            " */
		pnode->ul_size = ul_size;	/* Size of block */
	}
	return pnode;
}
Esempio n. 3
0
/*
 *  ======== make_new_chirp ========
 *      Allocate the memory for a new channel IRP.
 */
static struct chnl_irp *make_new_chirp(void)
{
	struct chnl_irp *chnl_packet_obj;

	chnl_packet_obj = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
	if (chnl_packet_obj != NULL) {
		/* lst_init_elem only resets the list's member values. */
		lst_init_elem(&chnl_packet_obj->link);
	}

	return chnl_packet_obj;
}
Esempio n. 4
0
/*
 *  ======== add_new_msg ========
 *      Must be called in message manager critical section.
 */
static int add_new_msg(struct lst_list *msgList)
{
	struct msg_frame *pmsg;
	int status = 0;

	pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
	if (pmsg != NULL) {
		lst_init_elem((struct list_head *)pmsg);
		lst_put_tail(msgList, (struct list_head *)pmsg);
	} else {
		status = -ENOMEM;
	}

	return status;
}
Esempio n. 5
0
/*
 *  ======== rmm_alloc ========
 */
int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
		     u32 align, u32 *dsp_address, bool reserve)
{
	struct rmm_ovly_sect *sect;
	struct rmm_ovly_sect *prev_sect = NULL;
	struct rmm_ovly_sect *new_sect;
	u32 addr;
	int status = 0;

	DBC_REQUIRE(target);
	DBC_REQUIRE(dsp_address != NULL);
	DBC_REQUIRE(size > 0);
	DBC_REQUIRE(reserve || (target->num_segs > 0));
	DBC_REQUIRE(refs > 0);

	if (!reserve) {
		if (!alloc_block(target, segid, size, align, dsp_address)) {
			status = -ENOMEM;
		} else {
			/* Increment the number of allocated blocks in this
			 * segment */
			target->seg_tab[segid].number++;
		}
		goto func_end;
	}
	/* An overlay section - See if block is already in use. If not,
	 * insert into the list in ascending address size. */
	addr = *dsp_address;
	sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
	/*  Find place to insert new list element. List is sorted from
	 *  smallest to largest address. */
	while (sect != NULL) {
		if (addr <= sect->addr) {
			/* Check for overlap with sect */
			if ((addr + size > sect->addr) || (prev_sect &&
							   (prev_sect->addr +
							    prev_sect->size >
							    addr))) {
				status = -ENXIO;
			}
			break;
		}
		prev_sect = sect;
		sect = (struct rmm_ovly_sect *)lst_next(target->ovly_list,
							(struct list_head *)
							sect);
	}
	if (!status) {
		/* No overlap - allocate list element for new section. */
		new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
		if (new_sect == NULL) {
			status = -ENOMEM;
		} else {
			lst_init_elem((struct list_head *)new_sect);
			new_sect->addr = addr;
			new_sect->size = size;
			new_sect->page = segid;
			if (sect == NULL) {
				/* Put new section at the end of the list */
				lst_put_tail(target->ovly_list,
					     (struct list_head *)new_sect);
			} else {
				/* Put new section just before sect */
				lst_insert_before(target->ovly_list,
						  (struct list_head *)new_sect,
						  (struct list_head *)sect);
			}
		}
	}
func_end:
	return status;
}
Esempio n. 6
0
/*
 *  ======== bridge_msg_create_queue ========
 *      Create a msg_queue for sending/receiving messages to/from a node
 *      on the DSP.
 */
int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
				OUT struct msg_queue **phMsgQueue,
				u32 msgq_id, u32 max_msgs, bhandle arg)
{
	u32 i;
	u32 num_allocated = 0;
	struct msg_queue *msg_q;
	int status = 0;

	if (!hmsg_mgr || phMsgQueue == NULL || !hmsg_mgr->msg_free_list) {
		status = -EFAULT;
		goto func_end;
	}

	*phMsgQueue = NULL;
	/* Allocate msg_queue object */
	msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
	if (!msg_q) {
		status = -ENOMEM;
		goto func_end;
	}
	lst_init_elem((struct list_head *)msg_q);
	msg_q->max_msgs = max_msgs;
	msg_q->hmsg_mgr = hmsg_mgr;
	msg_q->arg = arg;	/* Node handle */
	msg_q->msgq_id = msgq_id;	/* Node env (not valid yet) */
	/* Queues of Message frames for messages from the DSP */
	msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
	msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
	if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
		status = -ENOMEM;
	else {
		INIT_LIST_HEAD(&msg_q->msg_free_list->head);
		INIT_LIST_HEAD(&msg_q->msg_used_list->head);
	}

	/*  Create event that will be signalled when a message from
	 *  the DSP is available. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_event = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_event)
			sync_init_event(msg_q->sync_event);
		else
			status = -ENOMEM;
	}

	/* Create a notification list for message ready notification. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (msg_q->ntfy_obj)
			ntfy_init(msg_q->ntfy_obj);
		else
			status = -ENOMEM;
	}

	/*  Create events that will be used to synchronize cleanup
	 *  when the object is deleted. sync_done will be set to
	 *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
	 *  will be set by the unblocked thread to signal that it
	 *  is unblocked and will no longer reference the object. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_done = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_done)
			sync_init_event(msg_q->sync_done);
		else
			status = -ENOMEM;
	}

	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_done_ack)
			sync_init_event(msg_q->sync_done_ack);
		else
			status = -ENOMEM;
	}

	if (DSP_SUCCEEDED(status)) {
		/* Enter critical section */
		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
		/* Initialize message frames and put in appropriate queues */
		for (i = 0; i < max_msgs && DSP_SUCCEEDED(status); i++) {
			status = add_new_msg(hmsg_mgr->msg_free_list);
			if (DSP_SUCCEEDED(status)) {
				num_allocated++;
				status = add_new_msg(msg_q->msg_free_list);
			}
		}
		if (DSP_FAILED(status)) {
			/*  Stay inside CS to prevent others from taking any
			 *  of the newly allocated message frames. */
			delete_msg_queue(msg_q, num_allocated);
		} else {
			lst_put_tail(hmsg_mgr->queue_list,
				     (struct list_head *)msg_q);
			*phMsgQueue = msg_q;
			/* Signal that free frames are now available */
			if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
				sync_set_event(hmsg_mgr->sync_event);

		}
		/* Exit critical section */
		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
	} else {
		delete_msg_queue(msg_q, 0);
	}
func_end:
	return status;
}
Esempio n. 7
0
/*
 *  ======== dev_create_device ========
 *  Purpose:
 *      Called by the operating system to load the PM Bridge Driver for a
 *      PM board (device).
 */
int dev_create_device(struct dev_object **device_obj,
			     const char *driver_file_name,
			     struct cfg_devnode *dev_node_obj)
{
	struct cfg_hostres *host_res;
	struct ldr_module *module_obj = NULL;
	struct bridge_drv_interface *drv_fxns = NULL;
	struct dev_object *dev_obj = NULL;
	struct chnl_mgrattrs mgr_attrs;
	struct io_attrs io_mgr_attrs;
	u32 num_windows;
	struct drv_object *hdrv_obj = NULL;
	struct drv_data *drv_datap = dev_get_drvdata(bridge);
	int status = 0;
	DBC_REQUIRE(refs > 0);
	DBC_REQUIRE(device_obj != NULL);
	DBC_REQUIRE(driver_file_name != NULL);

	status = drv_request_bridge_res_dsp((void *)&host_res);

	if (status) {
		dev_dbg(bridge, "%s: Failed to reserve bridge resources\n",
			__func__);
		goto leave;
	}

	/*  Get the Bridge driver interface functions */
	bridge_drv_entry(&drv_fxns, driver_file_name);

	/* Retrieve the Object handle from the driver data */
	if (drv_datap && drv_datap->drv_object) {
		hdrv_obj = drv_datap->drv_object;
	} else {
		status = -EPERM;
		pr_err("%s: Failed to retrieve the object handle\n", __func__);
	}

	/* Create the device object, and pass a handle to the Bridge driver for
	 * storage. */
	if (!status) {
		DBC_ASSERT(drv_fxns);
		dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL);
		if (dev_obj) {
			/* Fill out the rest of the Dev Object structure: */
			dev_obj->dev_node_obj = dev_node_obj;
			dev_obj->module_obj = module_obj;
			dev_obj->cod_mgr = NULL;
			dev_obj->hchnl_mgr = NULL;
			dev_obj->hdeh_mgr = NULL;
			dev_obj->lock_owner = NULL;
			dev_obj->word_size = DSPWORDSIZE;
			dev_obj->hdrv_obj = hdrv_obj;
			dev_obj->dev_type = DSP_UNIT;
			/* Store this Bridge's interface functions, based on its
			 * version. */
			store_interface_fxns(drv_fxns,
						&dev_obj->bridge_interface);

			/* Call fxn_dev_create() to get the Bridge's device
			 * context handle. */
			status = (dev_obj->bridge_interface.pfn_dev_create)
			    (&dev_obj->hbridge_context, dev_obj,
			     host_res);
			/* Assert bridge_dev_create()'s ensure clause: */
			DBC_ASSERT(status
				   || (dev_obj->hbridge_context != NULL));
		} else {
			status = -ENOMEM;
		}
	}
	/* Attempt to create the COD manager for this device: */
	if (!status)
		status = init_cod_mgr(dev_obj);

	/* Attempt to create the channel manager for this device: */
	if (!status) {
		mgr_attrs.max_channels = CHNL_MAXCHANNELS;
		io_mgr_attrs.birq = host_res->birq_registers;
		io_mgr_attrs.irq_shared =
		    (host_res->birq_attrib & CFG_IRQSHARED);
		io_mgr_attrs.word_size = DSPWORDSIZE;
		mgr_attrs.word_size = DSPWORDSIZE;
		num_windows = host_res->num_mem_windows;
		if (num_windows) {
			/* Assume last memory window is for CHNL */
			io_mgr_attrs.shm_base = host_res->dw_mem_base[1] +
			    host_res->dw_offset_for_monitor;
			io_mgr_attrs.usm_length =
			    host_res->dw_mem_length[1] -
			    host_res->dw_offset_for_monitor;
		} else {
			io_mgr_attrs.shm_base = 0;
			io_mgr_attrs.usm_length = 0;
			pr_err("%s: No memory reserved for shared structures\n",
			       __func__);
		}
		status = chnl_create(&dev_obj->hchnl_mgr, dev_obj, &mgr_attrs);
		if (status == -ENOSYS) {
			/* It's OK for a device not to have a channel
			 * manager: */
			status = 0;
		}
		/* Create CMM mgr even if Msg Mgr not impl. */
		status = cmm_create(&dev_obj->hcmm_mgr,
				    (struct dev_object *)dev_obj, NULL);
		/* Only create IO manager if we have a channel manager */
		if (!status && dev_obj->hchnl_mgr) {
			status = io_create(&dev_obj->hio_mgr, dev_obj,
					   &io_mgr_attrs);
		}
		/* Only create DEH manager if we have an IO manager */
		if (!status) {
			/* Instantiate the DEH module */
			status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
		}
		/* Create DMM mgr . */
		status = dmm_create(&dev_obj->dmm_mgr,
				    (struct dev_object *)dev_obj, NULL);
	}
	/* Add the new DEV_Object to the global list: */
	if (!status) {
		lst_init_elem(&dev_obj->link);
		status = drv_insert_dev_object(hdrv_obj, dev_obj);
	}
	/* Create the Processor List */
	if (!status) {
		dev_obj->proc_list = kzalloc(sizeof(struct lst_list),
							GFP_KERNEL);
		if (!(dev_obj->proc_list))
			status = -EPERM;
		else
			INIT_LIST_HEAD(&dev_obj->proc_list->head);
	}
leave:
	/*  If all went well, return a handle to the dev object;
	 *  else, cleanup and return NULL in the OUT parameter. */
	if (!status) {
		*device_obj = dev_obj;
	} else {
		if (dev_obj) {
			kfree(dev_obj->proc_list);
			if (dev_obj->cod_mgr)
				cod_delete(dev_obj->cod_mgr);
			if (dev_obj->dmm_mgr)
				dmm_destroy(dev_obj->dmm_mgr);
			kfree(dev_obj);
		}

		*device_obj = NULL;
	}

	DBC_ENSURE((!status && *device_obj) || (status && !*device_obj));
	return status;
}
Esempio n. 8
0
/*
 *  ======== node_allocate ========
 *  Purpose:
 *      Allocate GPP resources to manage a node on the DSP.
 */
int node_allocate(struct proc_object *hprocessor,
			const struct dsp_uuid *node_uuid,
			const struct dsp_cbdata *pargs,
			const struct dsp_nodeattrin *attr_in,
			struct node_res_object **noderes,
			struct process_context *pr_ctxt)
{
	struct node_mgr *hnode_mgr;
	struct dev_object *hdev_obj;
	struct node_object *pnode = NULL;
	enum node_type node_type = NODE_TASK;
	struct node_msgargs *pmsg_args;
	struct node_taskargs *ptask_args;
	u32 num_streams;
	struct bridge_drv_interface *intf_fxns;
	int status = 0;
	struct cmm_object *hcmm_mgr = NULL;	/* Shared memory manager hndl */
	u32 proc_id;
	u32 pul_value;
	u32 dynext_base;
	u32 off_set = 0;
	u32 ul_stack_seg_addr, ul_stack_seg_val;
	u32 ul_gpp_mem_base;
	struct cfg_hostres *host_res;
	struct bridge_dev_context *pbridge_context;
	u32 mapped_addr = 0;
	u32 map_attrs = 0x0;
	struct dsp_processorstate proc_state;
#ifdef DSP_DMM_DEBUG
	struct dmm_object *dmm_mgr;
	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
#endif

	void *node_res;

	DBC_REQUIRE(refs > 0);
	DBC_REQUIRE(hprocessor != NULL);
	DBC_REQUIRE(noderes != NULL);
	DBC_REQUIRE(node_uuid != NULL);

	*noderes = NULL;

	status = proc_get_processor_id(hprocessor, &proc_id);

	if (proc_id != DSP_UNIT)
		goto func_end;

	status = proc_get_dev_object(hprocessor, &hdev_obj);
	if (!status) {
		status = dev_get_node_manager(hdev_obj, &hnode_mgr);
		if (hnode_mgr == NULL)
			status = -EPERM;

	}

	if (status)
		goto func_end;

	status = dev_get_bridge_context(hdev_obj, &pbridge_context);
	if (!pbridge_context) {
		status = -EFAULT;
		goto func_end;
	}

	status = proc_get_state(hprocessor, &proc_state,
				sizeof(struct dsp_processorstate));
	if (status)
		goto func_end;
	/* If processor is in error state then don't attempt
	   to send the message */
	if (proc_state.proc_state == PROC_ERROR) {
		status = -EPERM;
		goto func_end;
	}

	/* Assuming that 0 is not a valid function address */
	if (hnode_mgr->ul_fxn_addrs[0] == 0) {
		/* No RMS on target - we currently can't handle this */
		pr_err("%s: Failed, no RMS in base image\n", __func__);
		status = -EPERM;
	} else {
		/* Validate attr_in fields, if non-NULL */
		if (attr_in) {
			/* Check if attr_in->prio is within range */
			if (attr_in->prio < hnode_mgr->min_pri ||
			    attr_in->prio > hnode_mgr->max_pri)
				status = -EDOM;
		}
	}
	/* Allocate node object and fill in */
	if (status)
		goto func_end;

	pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
	if (pnode == NULL) {
		status = -ENOMEM;
		goto func_end;
	}
	pnode->hnode_mgr = hnode_mgr;
	/* This critical section protects get_node_props */
	mutex_lock(&hnode_mgr->node_mgr_lock);

	/* Get dsp_ndbprops from node database */
	status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
				&(pnode->dcd_props));
	if (status)
		goto func_cont;

	pnode->node_uuid = *node_uuid;
	pnode->hprocessor = hprocessor;
	pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
	pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
	pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;

	/* Currently only C64 DSP builds support Node Dynamic * heaps */
	/* Allocate memory for node heap */
	pnode->create_args.asa.task_arg_obj.heap_size = 0;
	pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
	pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
	pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
	if (!attr_in)
		goto func_cont;

	/* Check if we have a user allocated node heap */
	if (!(attr_in->pgpp_virt_addr))
		goto func_cont;

	/* check for page aligned Heap size */
	if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
		pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
		       __func__, attr_in->heap_size);
		status = -EINVAL;
	} else {
		pnode->create_args.asa.task_arg_obj.heap_size =
		    attr_in->heap_size;
		pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
		    (u32) attr_in->pgpp_virt_addr;
	}
	if (status)
		goto func_cont;

	status = proc_reserve_memory(hprocessor,
				     pnode->create_args.asa.task_arg_obj.
				     heap_size + PAGE_SIZE,
				     (void **)&(pnode->create_args.asa.
					task_arg_obj.udsp_heap_res_addr),
				     pr_ctxt);
	if (status) {
		pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
		       __func__, status);
		goto func_cont;
	}
#ifdef DSP_DMM_DEBUG
	status = dmm_get_handle(p_proc_object, &dmm_mgr);
	if (!dmm_mgr) {
		status = DSP_EHANDLE;
		goto func_cont;
	}

	dmm_mem_map_dump(dmm_mgr);
#endif

	map_attrs |= DSP_MAPLITTLEENDIAN;
	map_attrs |= DSP_MAPELEMSIZE32;
	map_attrs |= DSP_MAPVIRTUALADDR;
	status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
			  pnode->create_args.asa.task_arg_obj.heap_size,
			  (void *)pnode->create_args.asa.task_arg_obj.
			  udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
			  pr_ctxt);
	if (status)
		pr_err("%s: Failed to map memory for Heap: 0x%x\n",
		       __func__, status);
	else
		pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
		    (u32) mapped_addr;

func_cont:
	mutex_unlock(&hnode_mgr->node_mgr_lock);
	if (attr_in != NULL) {
		/* Overrides of NBD properties */
		pnode->utimeout = attr_in->utimeout;
		pnode->prio = attr_in->prio;
	}
	/* Create object to manage notifications */
	if (!status) {
		pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (pnode->ntfy_obj)
			ntfy_init(pnode->ntfy_obj);
		else
			status = -ENOMEM;
	}

	if (!status) {
		node_type = node_get_type(pnode);
		/*  Allocate dsp_streamconnect array for device, task, and
		 *  dais socket nodes. */
		if (node_type != NODE_MESSAGE) {
			num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
			pnode->stream_connect = kzalloc(num_streams *
					sizeof(struct dsp_streamconnect),
					GFP_KERNEL);
			if (num_streams > 0 && pnode->stream_connect == NULL)
				status = -ENOMEM;

		}
		if (!status && (node_type == NODE_TASK ||
					      node_type == NODE_DAISSOCKET)) {
			/* Allocate arrays for maintainig stream connections */
			pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
					sizeof(struct stream_chnl), GFP_KERNEL);
			pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
					sizeof(struct stream_chnl), GFP_KERNEL);
			ptask_args = &(pnode->create_args.asa.task_arg_obj);
			ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
						sizeof(struct node_strmdef),
						GFP_KERNEL);
			ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
						sizeof(struct node_strmdef),
						GFP_KERNEL);
			if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
						       ptask_args->strm_in_def
						       == NULL))
			    || (MAX_OUTPUTS(pnode) > 0
				&& (pnode->outputs == NULL
				    || ptask_args->strm_out_def == NULL)))
				status = -ENOMEM;
		}
	}
	if (!status && (node_type != NODE_DEVICE)) {
		/* Create an event that will be posted when RMS_EXIT is
		 * received. */
		pnode->sync_done = kzalloc(sizeof(struct sync_object),
								GFP_KERNEL);
		if (pnode->sync_done)
			sync_init_event(pnode->sync_done);
		else
			status = -ENOMEM;

		if (!status) {
			/*Get the shared mem mgr for this nodes dev object */
			status = cmm_get_handle(hprocessor, &hcmm_mgr);
			if (!status) {
				/* Allocate a SM addr translator for this node
				 * w/ deflt attr */
				status = cmm_xlator_create(&pnode->xlator,
							   hcmm_mgr, NULL);
			}
		}
		if (!status) {
			/* Fill in message args */
			if ((pargs != NULL) && (pargs->cb_data > 0)) {
				pmsg_args =
				    &(pnode->create_args.asa.node_msg_args);
				pmsg_args->pdata = kzalloc(pargs->cb_data,
								GFP_KERNEL);
				if (pmsg_args->pdata == NULL) {
					status = -ENOMEM;
				} else {
					pmsg_args->arg_length = pargs->cb_data;
					memcpy(pmsg_args->pdata,
					       pargs->node_data,
					       pargs->cb_data);
				}
			}
		}
	}

	if (!status && node_type != NODE_DEVICE) {
		/* Create a message queue for this node */
		intf_fxns = hnode_mgr->intf_fxns;
		status =
		    (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
							&pnode->msg_queue_obj,
							0,
							pnode->create_args.asa.
							node_msg_args.max_msgs,
							pnode);
	}

	if (!status) {
		/* Create object for dynamic loading */

		status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
							   (void *)pnode,
							   &pnode->dcd_props.
							   obj_data.node_obj,
							   &pnode->
							   nldr_node_obj,
							   &pnode->phase_split);
	}

	/* Compare value read from Node Properties and check if it is same as
	 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
	 * GPP Address, Read the value in that address and override the
	 * stack_seg value in task args */
	if (!status &&
	    (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
	    stack_seg_name != NULL) {
		if (strcmp((char *)
			   pnode->dcd_props.obj_data.node_obj.ndb_props.
			   stack_seg_name, STACKSEGLABEL) == 0) {
			status =
			    hnode_mgr->nldr_fxns.
			    pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
					     &dynext_base);
			if (status)
				pr_err("%s: Failed to get addr for DYNEXT_BEG"
				       " status = 0x%x\n", __func__, status);

			status =
			    hnode_mgr->nldr_fxns.
			    pfn_get_fxn_addr(pnode->nldr_node_obj,
					     "L1DSRAM_HEAP", &pul_value);

			if (status)
				pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
				       " status = 0x%x\n", __func__, status);

			host_res = pbridge_context->resources;
			if (!host_res)
				status = -EPERM;

			if (status) {
				pr_err("%s: Failed to get host resource, status"
				       " = 0x%x\n", __func__, status);
				goto func_end;
			}

			ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
			off_set = pul_value - dynext_base;
			ul_stack_seg_addr = ul_gpp_mem_base + off_set;
			ul_stack_seg_val = readl(ul_stack_seg_addr);

			dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
				" 0x%x\n", __func__, ul_stack_seg_val,
				ul_stack_seg_addr);

			pnode->create_args.asa.task_arg_obj.stack_seg =
			    ul_stack_seg_val;

		}
	}

	if (!status) {
		/* Add the node to the node manager's list of allocated
		 * nodes. */
		lst_init_elem((struct list_head *)pnode);
		NODE_SET_STATE(pnode, NODE_ALLOCATED);

		mutex_lock(&hnode_mgr->node_mgr_lock);

		lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
			++(hnode_mgr->num_nodes);

		/* Exit critical section */
		mutex_unlock(&hnode_mgr->node_mgr_lock);

		/* Preset this to assume phases are split
		 * (for overlay and dll) */
		pnode->phase_split = true;

		/* Notify all clients registered for DSP_NODESTATECHANGE. */
		proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
	} else {
		/* Cleanup */
		if (pnode)
			delete_node(pnode, pr_ctxt);

	}

	if (!status) {
		status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
		if (status) {
			delete_node(pnode, pr_ctxt);
			goto func_end;
		}

		*noderes = (struct node_res_object *)node_res;
		drv_proc_node_update_heap_status(node_res, true);
		drv_proc_node_update_status(node_res, true);
	}
	DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
func_end:
	dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
		"node_res: %p status: 0x%x\n", __func__, hprocessor,
		node_uuid, pargs, attr_in, noderes, status);
	return status;
}