예제 #1
0
/*
 *  ======== create_chirp_list ========
 *  Purpose:
 *      Initialize a queue of channel I/O Request/Completion packets.
 *  Parameters:
 *      uChirps:    Number of Chirps to allocate.
 *  Returns:
 *      Pointer to queue of IRPs, or NULL.
 *  Requires:
 *  Ensures:
 */
static struct lst_list *create_chirp_list(u32 uChirps)
{
	struct lst_list *chirp_list;
	struct chnl_irp *chnl_packet_obj;
	u32 i;

	chirp_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);

	if (chirp_list) {
		INIT_LIST_HEAD(&chirp_list->head);
		/* Make N chirps and place on queue. */
		for (i = 0; (i < uChirps)
		     && ((chnl_packet_obj = make_new_chirp()) != NULL); i++) {
			lst_put_tail(chirp_list,
				     (struct list_head *)chnl_packet_obj);
		}

		/* If we couldn't allocate all chirps, free those allocated: */
		if (i != uChirps) {
			free_chirp_list(chirp_list);
			chirp_list = NULL;
		}
	}

	return chirp_list;
}
예제 #2
0
파일: cmm.c 프로젝트: AdiPat/i9003_Kernel
/*
 *  ======== delete_node ========
 *  Purpose:
 *      Put a memory node on the cmm nodelist for later use.
 *      Doesn't actually delete the node. Heap thrashing friendly.
 */
static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
{
	DBC_REQUIRE(pnode != NULL);
	lst_init_elem((struct list_head *)pnode);	/* init .self ptr */
	lst_put_tail(cmm_mgr_obj->node_free_list_head,
		     (struct list_head *)pnode);
}
예제 #3
0
/*
 *  ======== bridge_chnl_cancel_io ========
 *      Return all I/O requests to the client which have not yet been
 *      transferred.  The channel's I/O completion object is
 *      signalled, and all the I/O requests are queued as IOC's, with the
 *      status field set to CHNL_IOCSTATCANCEL.
 *      This call is typically used in abort situations, and is a prelude to
 *      chnl_close();
 */
int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	u32 chnl_id = -1;
	s8 chnl_mode;
	struct chnl_irp *chnl_packet_obj;
	struct chnl_mgr *chnl_mgr_obj = NULL;

	/* Check args: */
	if (pchnl && pchnl->chnl_mgr_obj) {
		chnl_id = pchnl->chnl_id;
		chnl_mode = pchnl->chnl_mode;
		chnl_mgr_obj = pchnl->chnl_mgr_obj;
	} else {
		status = -EFAULT;
	}
	if (DSP_FAILED(status))
		goto func_end;

	/*  Mark this channel as cancelled, to prevent further IORequests or
	 *  IORequests or dispatching. */
	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	pchnl->dw_state |= CHNL_STATECANCEL;
	if (LST_IS_EMPTY(pchnl->pio_requests))
		goto func_cont;

	if (pchnl->chnl_type == CHNL_PCPY) {
		/* Indicate we have no more buffers available for transfer: */
		if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
			io_cancel_chnl(chnl_mgr_obj->hio_mgr, chnl_id);
		} else {
			/* Record that we no longer have output buffers
			 * available: */
			chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
		}
	}
	/* Move all IOR's to IOC queue: */
	while (!LST_IS_EMPTY(pchnl->pio_requests)) {
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
		if (chnl_packet_obj) {
			chnl_packet_obj->byte_size = 0;
			chnl_packet_obj->status |= CHNL_IOCSTATCANCEL;
			lst_put_tail(pchnl->pio_completions,
				     (struct list_head *)chnl_packet_obj);
			pchnl->cio_cs++;
			pchnl->cio_reqs--;
			DBC_ASSERT(pchnl->cio_reqs >= 0);
		}
	}
func_cont:
	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
func_end:
	return status;
}
예제 #4
0
/*
 *  ======== add_new_msg ========
 *      Must be called in message manager critical section.
 */
static int add_new_msg(struct lst_list *msgList)
{
	struct msg_frame *pmsg;
	int status = 0;

	pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
	if (pmsg != NULL) {
		lst_init_elem((struct list_head *)pmsg);
		lst_put_tail(msgList, (struct list_head *)pmsg);
	} else {
		status = -ENOMEM;
	}

	return status;
}
예제 #5
0
파일: dev.c 프로젝트: ANFS/ANFS-kernel
/*
 *  ======== dev_insert_proc_object ========
 *  Purpose:
 *      Insert a ProcObject into the list maintained by DEV.
 *  Parameters:
 *      p_proc_object:        Ptr to ProcObject to insert.
 *      dev_obj:         Ptr to Dev Object where the list is.
  *     already_attached:  Ptr to return the bool
 *  Returns:
 *      0:           If successful.
 *  Requires:
 *      List Exists
 *      hdev_obj is Valid handle
 *      DEV Initialized
 *      already_attached != NULL
 *      proc_obj != 0
 *  Ensures:
 *      0 and List is not Empty.
 */
int dev_insert_proc_object(struct dev_object *hdev_obj,
				  u32 proc_obj, bool *already_attached)
{
	int status = 0;
	struct dev_object *dev_obj = (struct dev_object *)hdev_obj;

	DBC_REQUIRE(refs > 0);
	DBC_REQUIRE(dev_obj);
	DBC_REQUIRE(proc_obj != 0);
	DBC_REQUIRE(dev_obj->proc_list != NULL);
	DBC_REQUIRE(already_attached != NULL);
	if (!LST_IS_EMPTY(dev_obj->proc_list))
		*already_attached = true;

	/* Add DevObject to tail. */
	lst_put_tail(dev_obj->proc_list, (struct list_head *)proc_obj);

	DBC_ENSURE(!status && !LST_IS_EMPTY(dev_obj->proc_list));

	return status;
}
예제 #6
0
파일: rmm.c 프로젝트: 3sOx/asuswrt-merlin
/*
 *  ======== rmm_alloc ========
 */
int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
		     u32 align, u32 *dsp_address, bool reserve)
{
	struct rmm_ovly_sect *sect;
	struct rmm_ovly_sect *prev_sect = NULL;
	struct rmm_ovly_sect *new_sect;
	u32 addr;
	int status = 0;

	DBC_REQUIRE(target);
	DBC_REQUIRE(dsp_address != NULL);
	DBC_REQUIRE(size > 0);
	DBC_REQUIRE(reserve || (target->num_segs > 0));
	DBC_REQUIRE(refs > 0);

	if (!reserve) {
		if (!alloc_block(target, segid, size, align, dsp_address)) {
			status = -ENOMEM;
		} else {
			/* Increment the number of allocated blocks in this
			 * segment */
			target->seg_tab[segid].number++;
		}
		goto func_end;
	}
	/* An overlay section - See if block is already in use. If not,
	 * insert into the list in ascending address size. */
	addr = *dsp_address;
	sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
	/*  Find place to insert new list element. List is sorted from
	 *  smallest to largest address. */
	while (sect != NULL) {
		if (addr <= sect->addr) {
			/* Check for overlap with sect */
			if ((addr + size > sect->addr) || (prev_sect &&
							   (prev_sect->addr +
							    prev_sect->size >
							    addr))) {
				status = -ENXIO;
			}
			break;
		}
		prev_sect = sect;
		sect = (struct rmm_ovly_sect *)lst_next(target->ovly_list,
							(struct list_head *)
							sect);
	}
	if (!status) {
		/* No overlap - allocate list element for new section. */
		new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
		if (new_sect == NULL) {
			status = -ENOMEM;
		} else {
			lst_init_elem((struct list_head *)new_sect);
			new_sect->addr = addr;
			new_sect->size = size;
			new_sect->page = segid;
			if (sect == NULL) {
				/* Put new section at the end of the list */
				lst_put_tail(target->ovly_list,
					     (struct list_head *)new_sect);
			} else {
				/* Put new section just before sect */
				lst_insert_before(target->ovly_list,
						  (struct list_head *)new_sect,
						  (struct list_head *)sect);
			}
		}
	}
func_end:
	return status;
}
예제 #7
0
/*
 *  ======== bridge_chnl_add_io_req ========
 *      Enqueue an I/O request for data transfer on a channel to the DSP.
 *      The direction (mode) is specified in the channel object. Note the DSP
 *      address is specified for channels opened in direct I/O mode.
 */
int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *pHostBuf,
			       u32 byte_size, u32 buf_size,
			       OPTIONAL u32 dw_dsp_addr, u32 dw_arg)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj = NULL;
	struct wmd_dev_context *dev_ctxt;
	struct dev_object *dev_obj;
	u8 dw_state;
	bool is_eos;
	struct chnl_mgr *chnl_mgr_obj;
	u8 *host_sys_buf = NULL;
	bool sched_dpc = false;
	u16 mb_val = 0;

	is_eos = (byte_size == 0);

	/* Validate args:  */
	if (pHostBuf == NULL) {
		status = -EFAULT;
	} else if (!pchnl) {
		status = -EFAULT;
	} else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
		status = -EPERM;
	} else {
		/* Check the channel state: only queue chirp if channel state
		 * allows */
		dw_state = pchnl->dw_state;
		if (dw_state != CHNL_STATEREADY) {
			if (dw_state & CHNL_STATECANCEL)
				status = -ECANCELED;
			else if ((dw_state & CHNL_STATEEOS)
				 && CHNL_IS_OUTPUT(pchnl->chnl_mode))
				status = -EPIPE;
			else
				/* No other possible states left: */
				DBC_ASSERT(0);
		}
	}

	if (DSP_FAILED(status))
		goto func_end;

	chnl_mgr_obj = pchnl->chnl_mgr_obj;

	dev_obj = dev_get_first();
	dev_get_wmd_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt || !chnl_mgr_obj)
		status = -EFAULT;

	if (DSP_FAILED(status))
		goto func_end;

	if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && pHostBuf) {
		if (!(pHostBuf < (void *)USERMODE_ADDR)) {
			host_sys_buf = pHostBuf;
			goto func_cont;
		}
		/* if addr in user mode, then copy to kernel space */
		host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
		if (host_sys_buf == NULL) {
			status = -ENOMEM;
			goto func_end;
		}
		if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
			status = copy_from_user(host_sys_buf, pHostBuf,
						buf_size);
			if (status) {
				kfree(host_sys_buf);
				host_sys_buf = NULL;
				status = -EFAULT;
				goto func_end;
			}
		}
	}
func_cont:
	/* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
	 * channels. DPCCS is held to avoid race conditions with PCPY channels.
	 * If DPC is scheduled in process context (iosm_schedule) and any
	 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
	 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (pchnl->chnl_type == CHNL_PCPY) {
		/* This is a processor-copy channel. */
		if (DSP_SUCCEEDED(status) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
			/* Check buffer size on output channels for fit. */
			if (byte_size >
			    io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
				status = -EINVAL;

		}
	}
	if (DSP_SUCCEEDED(status)) {
		/* Get a free chirp: */
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
		if (chnl_packet_obj == NULL)
			status = -EIO;

	}
	if (DSP_SUCCEEDED(status)) {
		/* Enqueue the chirp on the chnl's IORequest queue: */
		chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
		    pHostBuf;
		if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
			chnl_packet_obj->host_sys_buf = host_sys_buf;

		/*
		 * Note: for dma chans dw_dsp_addr contains dsp address
		 * of SM buffer.
		 */
		DBC_ASSERT(chnl_mgr_obj->word_size != 0);
		/* DSP address */
		chnl_packet_obj->dsp_tx_addr =
		    dw_dsp_addr / chnl_mgr_obj->word_size;
		chnl_packet_obj->byte_size = byte_size;
		chnl_packet_obj->buf_size = buf_size;
		/* Only valid for output channel */
		chnl_packet_obj->dw_arg = dw_arg;
		chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
					   CHNL_IOCSTATCOMPLETE);
		lst_put_tail(pchnl->pio_requests,
			     (struct list_head *)chnl_packet_obj);
		pchnl->cio_reqs++;
		DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
		/* If end of stream, update the channel state to prevent
		 * more IOR's: */
		if (is_eos)
			pchnl->dw_state |= CHNL_STATEEOS;

		/* Legacy DSM Processor-Copy */
		DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
		/* Request IO from the DSP */
		io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
				(CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
				 IO_OUTPUT), &mb_val);
		sched_dpc = true;

	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
	if (mb_val != 0)
		io_intr_dsp2(chnl_mgr_obj->hio_mgr, mb_val);

	/* Schedule a DPC, to do the actual data transfer: */
	if (sched_dpc)
		iosm_schedule(chnl_mgr_obj->hio_mgr);

func_end:
	return status;
}
예제 #8
0
/*
 *  ======== bridge_chnl_get_ioc ========
 *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
 *      completion record, which contains information about the completed
 *      I/O request.
 *      Note: Ensures Channel Invariant (see notes above).
 */
int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 dwTimeOut,
			    OUT struct chnl_ioc *pIOC)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj;
	int stat_sync;
	bool dequeue_ioc = true;
	struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
	u8 *host_sys_buf = NULL;
	struct wmd_dev_context *dev_ctxt;
	struct dev_object *dev_obj;

	/* Check args: */
	if (pIOC == NULL) {
		status = -EFAULT;
	} else if (!pchnl) {
		status = -EFAULT;
	} else if (dwTimeOut == CHNL_IOCNOWAIT) {
		if (LST_IS_EMPTY(pchnl->pio_completions))
			status = -EREMOTEIO;

	}

	dev_obj = dev_get_first();
	dev_get_wmd_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = -EFAULT;

	if (DSP_FAILED(status))
		goto func_end;

	ioc.status = CHNL_IOCSTATCOMPLETE;
	if (dwTimeOut !=
	    CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
		if (dwTimeOut == CHNL_IOCINFINITE)
			dwTimeOut = SYNC_INFINITE;

		stat_sync = sync_wait_on_event(pchnl->sync_event, dwTimeOut);
		if (stat_sync == -ETIME) {
			/* No response from DSP */
			ioc.status |= CHNL_IOCSTATTIMEOUT;
			dequeue_ioc = false;
		} else if (stat_sync == -EPERM) {
			/* This can occur when the user mode thread is
			 * aborted (^C), or when _VWIN32_WaitSingleObject()
			 * fails due to unkown causes. */
			/* Even though Wait failed, there may be something in
			 * the Q: */
			if (LST_IS_EMPTY(pchnl->pio_completions)) {
				ioc.status |= CHNL_IOCSTATCANCEL;
				dequeue_ioc = false;
			}
		}
	}
	/* See comment in AddIOReq */
	spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (dequeue_ioc) {
		/* Dequeue IOC and set pIOC; */
		DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
		/* Update pIOC from channel state and chirp: */
		if (chnl_packet_obj) {
			pchnl->cio_cs--;
			/*  If this is a zero-copy channel, then set IOC's pbuf
			 *  to the DSP's address. This DSP address will get
			 *  translated to user's virtual addr later. */
			{
				host_sys_buf = chnl_packet_obj->host_sys_buf;
				ioc.pbuf = chnl_packet_obj->host_user_buf;
			}
			ioc.byte_size = chnl_packet_obj->byte_size;
			ioc.buf_size = chnl_packet_obj->buf_size;
			ioc.dw_arg = chnl_packet_obj->dw_arg;
			ioc.status |= chnl_packet_obj->status;
			/* Place the used chirp on the free list: */
			lst_put_tail(pchnl->free_packets_list,
				     (struct list_head *)chnl_packet_obj);
		} else {
			ioc.pbuf = NULL;
			ioc.byte_size = 0;
		}
	} else {
		ioc.pbuf = NULL;
		ioc.byte_size = 0;
		ioc.dw_arg = 0;
		ioc.buf_size = 0;
	}
	/* Ensure invariant: If any IOC's are queued for this channel... */
	if (!LST_IS_EMPTY(pchnl->pio_completions)) {
		/*  Since DSPStream_Reclaim() does not take a timeout
		 *  parameter, we pass the stream's timeout value to
		 *  bridge_chnl_get_ioc. We cannot determine whether or not
		 *  we have waited in User mode. Since the stream's timeout
		 *  value may be non-zero, we still have to set the event.
		 *  Therefore, this optimization is taken out.
		 *
		 *  if (dwTimeOut == CHNL_IOCNOWAIT) {
		 *    ... ensure event is set..
		 *      sync_set_event(pchnl->sync_event);
		 *  } */
		sync_set_event(pchnl->sync_event);
	} else {
		/* else, if list is empty, ensure event is reset. */
		sync_reset_event(pchnl->sync_event);
	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	if (dequeue_ioc
	    && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
		if (!(ioc.pbuf < (void *)USERMODE_ADDR))
			goto func_cont;

		/* If the addr is in user mode, then copy it */
		if (!host_sys_buf || !ioc.pbuf) {
			status = -EFAULT;
			goto func_cont;
		}
		if (!CHNL_IS_INPUT(pchnl->chnl_mode))
			goto func_cont1;

		/*host_user_buf */
		status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
		if (status) {
			if (current->flags & PF_EXITING)
				status = 0;
		}
		if (status)
			status = -EFAULT;
func_cont1:
		kfree(host_sys_buf);
	}
func_cont:
	/* Update User's IOC block: */
	*pIOC = ioc;
func_end:
	return status;
}
예제 #9
0
/*
 *  ======== bridge_msg_put ========
 *      Put a message onto a msg_ctrl queue.
 */
int bridge_msg_put(struct msg_queue *msg_queue_obj,
			  IN CONST struct dsp_msg *pmsg, u32 utimeout)
{
	struct msg_frame *msg_frame_obj;
	struct msg_mgr *hmsg_mgr;
	bool put_msg = false;
	struct sync_object *syncs[2];
	u32 index;
	int status = 0;

	if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) {
		status = -ENOMEM;
		goto func_end;
	}
	hmsg_mgr = msg_queue_obj->hmsg_mgr;
	if (!hmsg_mgr->msg_free_list) {
		status = -EFAULT;
		goto func_end;
	}

	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);

	/* If a message frame is available, use it */
	if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
		msg_frame_obj =
		    (struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list);
		if (msg_frame_obj != NULL) {
			msg_frame_obj->msg_data.msg = *pmsg;
			msg_frame_obj->msg_data.msgq_id =
			    msg_queue_obj->msgq_id;
			lst_put_tail(hmsg_mgr->msg_used_list,
				     (struct list_head *)msg_frame_obj);
			hmsg_mgr->msgs_pending++;
			put_msg = true;
		}
		if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
			sync_reset_event(hmsg_mgr->sync_event);

		/* Release critical section before scheduling DPC */
		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
		/* Schedule a DPC, to do the actual data transfer: */
		iosm_schedule(hmsg_mgr->hio_mgr);
	} else {
		if (msg_queue_obj->done)
			status = -EPERM;
		else
			msg_queue_obj->io_msg_pend++;

		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
	}
	if (DSP_SUCCEEDED(status) && !put_msg) {
		/* Wait til a free message frame is available, timeout,
		 * or done */
		syncs[0] = hmsg_mgr->sync_event;
		syncs[1] = msg_queue_obj->sync_done;
		status = sync_wait_on_multiple_events(syncs, 2, utimeout,
						      &index);
		if (DSP_FAILED(status))
			goto func_end;
		/* Enter critical section */
		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
		if (msg_queue_obj->done) {
			msg_queue_obj->io_msg_pend--;
			/* Exit critical section */
			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
			/*  Signal that we're not going to access msg_queue_obj
			 *  anymore, so it can be deleted. */
			(void)sync_set_event(msg_queue_obj->sync_done_ack);
			status = -EPERM;
		} else {
			if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
				status = -EFAULT;
				goto func_cont;
			}
			/* Get msg from free list */
			msg_frame_obj = (struct msg_frame *)
			    lst_get_head(hmsg_mgr->msg_free_list);
			/*
			 * Copy message into pmsg and put frame on the
			 * used list.
			 */
			if (msg_frame_obj) {
				msg_frame_obj->msg_data.msg = *pmsg;
				msg_frame_obj->msg_data.msgq_id =
				    msg_queue_obj->msgq_id;
				lst_put_tail(hmsg_mgr->msg_used_list,
					     (struct list_head *)msg_frame_obj);
				hmsg_mgr->msgs_pending++;
				/*
				 * Schedule a DPC, to do the actual
				 * data transfer.
				 */
				iosm_schedule(hmsg_mgr->hio_mgr);
			}

			msg_queue_obj->io_msg_pend--;
			/* Reset event if there are still frames available */
			if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
				sync_set_event(hmsg_mgr->sync_event);
func_cont:
			/* Exit critical section */
			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
		}
	}
func_end:
	return status;
}
예제 #10
0
/*
 *  ======== bridge_msg_get ========
 *      Get a message from a msg_ctrl queue.
 */
int bridge_msg_get(struct msg_queue *msg_queue_obj,
			  struct dsp_msg *pmsg, u32 utimeout)
{
	struct msg_frame *msg_frame_obj;
	struct msg_mgr *hmsg_mgr;
	bool got_msg = false;
	struct sync_object *syncs[2];
	u32 index;
	int status = 0;

	if (!msg_queue_obj || pmsg == NULL) {
		status = -ENOMEM;
		goto func_end;
	}

	hmsg_mgr = msg_queue_obj->hmsg_mgr;
	if (!msg_queue_obj->msg_used_list) {
		status = -EFAULT;
		goto func_end;
	}

	/* Enter critical section */
	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
	/* If a message is already there, get it */
	if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
		msg_frame_obj = (struct msg_frame *)
		    lst_get_head(msg_queue_obj->msg_used_list);
		if (msg_frame_obj != NULL) {
			*pmsg = msg_frame_obj->msg_data.msg;
			lst_put_tail(msg_queue_obj->msg_free_list,
				     (struct list_head *)msg_frame_obj);
			if (LST_IS_EMPTY(msg_queue_obj->msg_used_list))
				sync_reset_event(msg_queue_obj->sync_event);
			else {
				ntfy_notify(msg_queue_obj->ntfy_obj,
					    DSP_NODEMESSAGEREADY);
				sync_set_event(msg_queue_obj->sync_event);
			}

			got_msg = true;
		}
	} else {
		if (msg_queue_obj->done)
			status = -EPERM;
		else
			msg_queue_obj->io_msg_pend++;

	}
	/* Exit critical section */
	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
	if (DSP_SUCCEEDED(status) && !got_msg) {
		/*  Wait til message is available, timeout, or done. We don't
		 *  have to schedule the DPC, since the DSP will send messages
		 *  when they are available. */
		syncs[0] = msg_queue_obj->sync_event;
		syncs[1] = msg_queue_obj->sync_done;
		status = sync_wait_on_multiple_events(syncs, 2, utimeout,
						      &index);
		/* Enter critical section */
		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
		if (msg_queue_obj->done) {
			msg_queue_obj->io_msg_pend--;
			/* Exit critical section */
			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
			/*  Signal that we're not going to access msg_queue_obj
			 *  anymore, so it can be deleted. */
			(void)sync_set_event(msg_queue_obj->sync_done_ack);
			status = -EPERM;
		} else {
			if (DSP_SUCCEEDED(status)) {
				DBC_ASSERT(!LST_IS_EMPTY
					   (msg_queue_obj->msg_used_list));
				/* Get msg from used list */
				msg_frame_obj = (struct msg_frame *)
				    lst_get_head(msg_queue_obj->msg_used_list);
				/* Copy message into pmsg and put frame on the
				 * free list */
				if (msg_frame_obj != NULL) {
					*pmsg = msg_frame_obj->msg_data.msg;
					lst_put_tail
					    (msg_queue_obj->msg_free_list,
					     (struct list_head *)
					     msg_frame_obj);
				}
			}
			msg_queue_obj->io_msg_pend--;
			/* Reset the event if there are still queued messages */
			if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
				ntfy_notify(msg_queue_obj->ntfy_obj,
					    DSP_NODEMESSAGEREADY);
				sync_set_event(msg_queue_obj->sync_event);
			}
			/* Exit critical section */
			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
		}
	}
func_end:
	return status;
}
예제 #11
0
/*
 *  ======== bridge_msg_create_queue ========
 *      Create a msg_queue for sending/receiving messages to/from a node
 *      on the DSP.
 */
int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
				OUT struct msg_queue **phMsgQueue,
				u32 msgq_id, u32 max_msgs, bhandle arg)
{
	u32 i;
	u32 num_allocated = 0;
	struct msg_queue *msg_q;
	int status = 0;

	if (!hmsg_mgr || phMsgQueue == NULL || !hmsg_mgr->msg_free_list) {
		status = -EFAULT;
		goto func_end;
	}

	*phMsgQueue = NULL;
	/* Allocate msg_queue object */
	msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
	if (!msg_q) {
		status = -ENOMEM;
		goto func_end;
	}
	lst_init_elem((struct list_head *)msg_q);
	msg_q->max_msgs = max_msgs;
	msg_q->hmsg_mgr = hmsg_mgr;
	msg_q->arg = arg;	/* Node handle */
	msg_q->msgq_id = msgq_id;	/* Node env (not valid yet) */
	/* Queues of Message frames for messages from the DSP */
	msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
	msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
	if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
		status = -ENOMEM;
	else {
		INIT_LIST_HEAD(&msg_q->msg_free_list->head);
		INIT_LIST_HEAD(&msg_q->msg_used_list->head);
	}

	/*  Create event that will be signalled when a message from
	 *  the DSP is available. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_event = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_event)
			sync_init_event(msg_q->sync_event);
		else
			status = -ENOMEM;
	}

	/* Create a notification list for message ready notification. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (msg_q->ntfy_obj)
			ntfy_init(msg_q->ntfy_obj);
		else
			status = -ENOMEM;
	}

	/*  Create events that will be used to synchronize cleanup
	 *  when the object is deleted. sync_done will be set to
	 *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
	 *  will be set by the unblocked thread to signal that it
	 *  is unblocked and will no longer reference the object. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_done = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_done)
			sync_init_event(msg_q->sync_done);
		else
			status = -ENOMEM;
	}

	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_done_ack)
			sync_init_event(msg_q->sync_done_ack);
		else
			status = -ENOMEM;
	}

	if (DSP_SUCCEEDED(status)) {
		/* Enter critical section */
		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
		/* Initialize message frames and put in appropriate queues */
		for (i = 0; i < max_msgs && DSP_SUCCEEDED(status); i++) {
			status = add_new_msg(hmsg_mgr->msg_free_list);
			if (DSP_SUCCEEDED(status)) {
				num_allocated++;
				status = add_new_msg(msg_q->msg_free_list);
			}
		}
		if (DSP_FAILED(status)) {
			/*  Stay inside CS to prevent others from taking any
			 *  of the newly allocated message frames. */
			delete_msg_queue(msg_q, num_allocated);
		} else {
			lst_put_tail(hmsg_mgr->queue_list,
				     (struct list_head *)msg_q);
			*phMsgQueue = msg_q;
			/* Signal that free frames are now available */
			if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
				sync_set_event(hmsg_mgr->sync_event);

		}
		/* Exit critical section */
		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
	} else {
		delete_msg_queue(msg_q, 0);
	}
func_end:
	return status;
}
예제 #12
0
파일: cmm.c 프로젝트: AdiPat/i9003_Kernel
/*
 *  ======== add_to_free_list ========
 *  Purpose:
 *      Coelesce node into the freelist in ascending size order.
 */
static void add_to_free_list(struct cmm_allocator *allocator,
			     struct cmm_mnode *pnode)
{
	struct cmm_mnode *node_prev = NULL;
	struct cmm_mnode *node_next = NULL;
	struct cmm_mnode *mnode_obj;
	u32 dw_this_pa;
	u32 dw_next_pa;

	DBC_REQUIRE(pnode != NULL);
	DBC_REQUIRE(allocator != NULL);
	dw_this_pa = pnode->dw_pa;
	dw_next_pa = NEXT_PA(pnode);
	mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
	while (mnode_obj) {
		if (dw_this_pa == NEXT_PA(mnode_obj)) {
			/* found the block ahead of this one */
			node_prev = mnode_obj;
		} else if (dw_next_pa == mnode_obj->dw_pa) {
			node_next = mnode_obj;
		}
		if ((node_prev == NULL) || (node_next == NULL)) {
			/* next node. */
			mnode_obj = (struct cmm_mnode *)
			    lst_next(allocator->free_list_head,
				     (struct list_head *)mnode_obj);
		} else {
			/* got 'em */
			break;
		}
	}			/* while */
	if (node_prev != NULL) {
		/* combine with previous block */
		lst_remove_elem(allocator->free_list_head,
				(struct list_head *)node_prev);
		/* grow node to hold both */
		pnode->ul_size += node_prev->ul_size;
		pnode->dw_pa = node_prev->dw_pa;
		pnode->dw_va = node_prev->dw_va;
		/* place node on mgr nodeFreeList */
		delete_node((struct cmm_object *)allocator->hcmm_mgr,
			    node_prev);
	}
	if (node_next != NULL) {
		/* combine with next block */
		lst_remove_elem(allocator->free_list_head,
				(struct list_head *)node_next);
		/* grow da node */
		pnode->ul_size += node_next->ul_size;
		/* place node on mgr nodeFreeList */
		delete_node((struct cmm_object *)allocator->hcmm_mgr,
			    node_next);
	}
	/* Now, let's add to freelist in increasing size order */
	mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
	while (mnode_obj) {
		if (pnode->ul_size <= mnode_obj->ul_size)
			break;

		/* next node. */
		mnode_obj =
		    (struct cmm_mnode *)lst_next(allocator->free_list_head,
						 (struct list_head *)mnode_obj);
	}
	/* if mnode_obj is NULL then add our pnode to the end of the freelist */
	if (mnode_obj == NULL) {
		lst_put_tail(allocator->free_list_head,
			     (struct list_head *)pnode);
	} else {
		/* insert our node before the current traversed node */
		lst_insert_before(allocator->free_list_head,
				  (struct list_head *)pnode,
				  (struct list_head *)mnode_obj);
	}
}
예제 #13
0
파일: cmm.c 프로젝트: AdiPat/i9003_Kernel
/*
 *  ======== cmm_register_gppsm_seg ========
 *  Purpose:
 *      Register a block of SM with the CMM to be used for later GPP SM
 *      allocations.
 */
int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
				  u32 dw_gpp_base_pa, u32 ul_size,
				  u32 dwDSPAddrOffset, s8 c_factor,
				  u32 dw_dsp_base, u32 ul_dsp_size,
				  u32 *pulSegId, u32 dw_gpp_base_va)
{
	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
	struct cmm_allocator *psma = NULL;
	int status = 0;
	struct cmm_mnode *new_node;
	s32 slot_seg;

	DBC_REQUIRE(ul_size > 0);
	DBC_REQUIRE(pulSegId != NULL);
	DBC_REQUIRE(dw_gpp_base_pa != 0);
	DBC_REQUIRE(dw_gpp_base_va != 0);
	DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
		    (c_factor >= CMM_SUBFROMDSPPA));
	dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dwDSPAddrOffset %x "
		"dw_dsp_base %x ul_dsp_size %x dw_gpp_base_va %x\n", __func__,
		dw_gpp_base_pa, ul_size, dwDSPAddrOffset, dw_dsp_base,
		ul_dsp_size, dw_gpp_base_va);
	if (!hcmm_mgr) {
		status = -EFAULT;
		return status;
	}
	/* make sure we have room for another allocator */
	mutex_lock(&cmm_mgr_obj->cmm_lock);
	slot_seg = get_slot(cmm_mgr_obj);
	if (slot_seg < 0) {
		/* get a slot number */
		status = -EPERM;
		goto func_end;
	}
	/* Check if input ul_size is big enough to alloc at least one block */
	if (DSP_SUCCEEDED(status)) {
		if (ul_size < cmm_mgr_obj->ul_min_block_size) {
			status = -EINVAL;
			goto func_end;
		}
	}
	if (DSP_SUCCEEDED(status)) {
		/* create, zero, and tag an SM allocator object */
		psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
	}
	if (psma != NULL) {
		psma->hcmm_mgr = hcmm_mgr;	/* ref to parent */
		psma->shm_base = dw_gpp_base_pa;	/* SM Base phys */
		psma->ul_sm_size = ul_size;	/* SM segment size in bytes */
		psma->dw_vm_base = dw_gpp_base_va;
		psma->dw_dsp_phys_addr_offset = dwDSPAddrOffset;
		psma->c_factor = c_factor;
		psma->dw_dsp_base = dw_dsp_base;
		psma->ul_dsp_size = ul_dsp_size;
		if (psma->dw_vm_base == 0) {
			status = -EPERM;
			goto func_end;
		}
		if (DSP_SUCCEEDED(status)) {
			/* return the actual segment identifier */
			*pulSegId = (u32) slot_seg + 1;
			/* create memory free list */
			psma->free_list_head = kzalloc(sizeof(struct lst_list),
								GFP_KERNEL);
			if (psma->free_list_head == NULL) {
				status = -ENOMEM;
				goto func_end;
			}
			INIT_LIST_HEAD(&psma->free_list_head->head);
		}
		if (DSP_SUCCEEDED(status)) {
			/* create memory in-use list */
			psma->in_use_list_head = kzalloc(sizeof(struct
							lst_list), GFP_KERNEL);
			if (psma->in_use_list_head == NULL) {
				status = -ENOMEM;
				goto func_end;
			}
			INIT_LIST_HEAD(&psma->in_use_list_head->head);
		}
		if (DSP_SUCCEEDED(status)) {
			/* Get a mem node for this hunk-o-memory */
			new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
					    psma->dw_vm_base, ul_size);
			/* Place node on the SM allocator's free list */
			if (new_node) {
				lst_put_tail(psma->free_list_head,
					     (struct list_head *)new_node);
			} else {
				status = -ENOMEM;
				goto func_end;
			}
		}
		if (DSP_FAILED(status)) {
			/* Cleanup allocator */
			un_register_gppsm_seg(psma);
		}
	} else {
		status = -ENOMEM;
		goto func_end;
	}
	/* make entry */
	if (DSP_SUCCEEDED(status))
		cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;

func_end:
	mutex_unlock(&cmm_mgr_obj->cmm_lock);
	return status;
}
예제 #14
0
파일: cmm.c 프로젝트: AdiPat/i9003_Kernel
/*
 *  ======== cmm_calloc_buf ========
 *  Purpose:
 *      Allocate a SM buffer, zero contents, and return the physical address
 *      and optional driver context virtual address(pp_buf_va).
 *
 *      The freelist is sorted in increasing size order. Get the first
 *      block that satifies the request and sort the remaining back on
 *      the freelist; if large enough. The kept block is placed on the
 *      inUseList.
 */
void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
		     struct cmm_attrs *pattrs, OUT void **pp_buf_va)
{
	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
	void *buf_pa = NULL;
	struct cmm_mnode *pnode = NULL;
	struct cmm_mnode *new_node = NULL;
	struct cmm_allocator *allocator = NULL;
	u32 delta_size;
	u8 *pbyte = NULL;
	s32 cnt;

	if (pattrs == NULL)
		pattrs = &cmm_dfltalctattrs;

	if (pp_buf_va != NULL)
		*pp_buf_va = NULL;

	if (cmm_mgr_obj && (usize != 0)) {
		if (pattrs->ul_seg_id > 0) {
			/* SegId > 0 is SM */
			/* get the allocator object for this segment id */
			allocator =
			    get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
			/* keep block size a multiple of ul_min_block_size */
			usize =
			    ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
					     1))
			    + cmm_mgr_obj->ul_min_block_size;
			mutex_lock(&cmm_mgr_obj->cmm_lock);
			pnode = get_free_block(allocator, usize);
		}
		if (pnode) {
			delta_size = (pnode->ul_size - usize);
			if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
				/* create a new block with the leftovers and
				 * add to freelist */
				new_node =
				    get_node(cmm_mgr_obj, pnode->dw_pa + usize,
					     pnode->dw_va + usize,
					     (u32) delta_size);
				if (new_node) {
					/* leftovers go free */
					add_to_free_list(allocator, new_node);
				}
				/* adjust our node's size */
				pnode->ul_size = usize;
			}
			/* Tag node with client process requesting allocation
			 * We'll need to free up a process's alloc'd SM if the
			 * client process goes away.
			 */
			/* Return TGID instead of process handle */
			pnode->client_proc = current->tgid;

			/* put our node on InUse list */
			lst_put_tail(allocator->in_use_list_head,
				     (struct list_head *)pnode);
			buf_pa = (void *)pnode->dw_pa;	/* physical address */
			/* clear mem */
			pbyte = (u8 *) pnode->dw_va;
			for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
				*pbyte = 0;

			if (pp_buf_va != NULL) {
				/* Virtual address */
				*pp_buf_va = (void *)pnode->dw_va;
			}
		}
		mutex_unlock(&cmm_mgr_obj->cmm_lock);
	}
	return buf_pa;
}
예제 #15
0
/*
 *  ======== node_allocate ========
 *  Purpose:
 *      Allocate GPP resources to manage a node on the DSP.
 */
int node_allocate(struct proc_object *hprocessor,
			const struct dsp_uuid *node_uuid,
			const struct dsp_cbdata *pargs,
			const struct dsp_nodeattrin *attr_in,
			struct node_res_object **noderes,
			struct process_context *pr_ctxt)
{
	struct node_mgr *hnode_mgr;
	struct dev_object *hdev_obj;
	struct node_object *pnode = NULL;
	enum node_type node_type = NODE_TASK;
	struct node_msgargs *pmsg_args;
	struct node_taskargs *ptask_args;
	u32 num_streams;
	struct bridge_drv_interface *intf_fxns;
	int status = 0;
	struct cmm_object *hcmm_mgr = NULL;	/* Shared memory manager hndl */
	u32 proc_id;
	u32 pul_value;
	u32 dynext_base;
	u32 off_set = 0;
	u32 ul_stack_seg_addr, ul_stack_seg_val;
	u32 ul_gpp_mem_base;
	struct cfg_hostres *host_res;
	struct bridge_dev_context *pbridge_context;
	u32 mapped_addr = 0;
	u32 map_attrs = 0x0;
	struct dsp_processorstate proc_state;
#ifdef DSP_DMM_DEBUG
	struct dmm_object *dmm_mgr;
	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
#endif

	void *node_res;

	DBC_REQUIRE(refs > 0);
	DBC_REQUIRE(hprocessor != NULL);
	DBC_REQUIRE(noderes != NULL);
	DBC_REQUIRE(node_uuid != NULL);

	*noderes = NULL;

	status = proc_get_processor_id(hprocessor, &proc_id);

	if (proc_id != DSP_UNIT)
		goto func_end;

	status = proc_get_dev_object(hprocessor, &hdev_obj);
	if (!status) {
		status = dev_get_node_manager(hdev_obj, &hnode_mgr);
		if (hnode_mgr == NULL)
			status = -EPERM;

	}

	if (status)
		goto func_end;

	status = dev_get_bridge_context(hdev_obj, &pbridge_context);
	if (!pbridge_context) {
		status = -EFAULT;
		goto func_end;
	}

	status = proc_get_state(hprocessor, &proc_state,
				sizeof(struct dsp_processorstate));
	if (status)
		goto func_end;
	/* If processor is in error state then don't attempt
	   to send the message */
	if (proc_state.proc_state == PROC_ERROR) {
		status = -EPERM;
		goto func_end;
	}

	/* Assuming that 0 is not a valid function address */
	if (hnode_mgr->ul_fxn_addrs[0] == 0) {
		/* No RMS on target - we currently can't handle this */
		pr_err("%s: Failed, no RMS in base image\n", __func__);
		status = -EPERM;
	} else {
		/* Validate attr_in fields, if non-NULL */
		if (attr_in) {
			/* Check if attr_in->prio is within range */
			if (attr_in->prio < hnode_mgr->min_pri ||
			    attr_in->prio > hnode_mgr->max_pri)
				status = -EDOM;
		}
	}
	/* Allocate node object and fill in */
	if (status)
		goto func_end;

	pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
	if (pnode == NULL) {
		status = -ENOMEM;
		goto func_end;
	}
	pnode->hnode_mgr = hnode_mgr;
	/* This critical section protects get_node_props */
	mutex_lock(&hnode_mgr->node_mgr_lock);

	/* Get dsp_ndbprops from node database */
	status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
				&(pnode->dcd_props));
	if (status)
		goto func_cont;

	pnode->node_uuid = *node_uuid;
	pnode->hprocessor = hprocessor;
	pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
	pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
	pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;

	/* Currently only C64 DSP builds support Node Dynamic * heaps */
	/* Allocate memory for node heap */
	pnode->create_args.asa.task_arg_obj.heap_size = 0;
	pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
	pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
	pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
	if (!attr_in)
		goto func_cont;

	/* Check if we have a user allocated node heap */
	if (!(attr_in->pgpp_virt_addr))
		goto func_cont;

	/* check for page aligned Heap size */
	if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
		pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
		       __func__, attr_in->heap_size);
		status = -EINVAL;
	} else {
		pnode->create_args.asa.task_arg_obj.heap_size =
		    attr_in->heap_size;
		pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
		    (u32) attr_in->pgpp_virt_addr;
	}
	if (status)
		goto func_cont;

	status = proc_reserve_memory(hprocessor,
				     pnode->create_args.asa.task_arg_obj.
				     heap_size + PAGE_SIZE,
				     (void **)&(pnode->create_args.asa.
					task_arg_obj.udsp_heap_res_addr),
				     pr_ctxt);
	if (status) {
		pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
		       __func__, status);
		goto func_cont;
	}
#ifdef DSP_DMM_DEBUG
	status = dmm_get_handle(p_proc_object, &dmm_mgr);
	if (!dmm_mgr) {
		status = DSP_EHANDLE;
		goto func_cont;
	}

	dmm_mem_map_dump(dmm_mgr);
#endif

	map_attrs |= DSP_MAPLITTLEENDIAN;
	map_attrs |= DSP_MAPELEMSIZE32;
	map_attrs |= DSP_MAPVIRTUALADDR;
	status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
			  pnode->create_args.asa.task_arg_obj.heap_size,
			  (void *)pnode->create_args.asa.task_arg_obj.
			  udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
			  pr_ctxt);
	if (status)
		pr_err("%s: Failed to map memory for Heap: 0x%x\n",
		       __func__, status);
	else
		pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
		    (u32) mapped_addr;

func_cont:
	mutex_unlock(&hnode_mgr->node_mgr_lock);
	if (attr_in != NULL) {
		/* Overrides of NBD properties */
		pnode->utimeout = attr_in->utimeout;
		pnode->prio = attr_in->prio;
	}
	/* Create object to manage notifications */
	if (!status) {
		pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (pnode->ntfy_obj)
			ntfy_init(pnode->ntfy_obj);
		else
			status = -ENOMEM;
	}

	if (!status) {
		node_type = node_get_type(pnode);
		/*  Allocate dsp_streamconnect array for device, task, and
		 *  dais socket nodes. */
		if (node_type != NODE_MESSAGE) {
			num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
			pnode->stream_connect = kzalloc(num_streams *
					sizeof(struct dsp_streamconnect),
					GFP_KERNEL);
			if (num_streams > 0 && pnode->stream_connect == NULL)
				status = -ENOMEM;

		}
		if (!status && (node_type == NODE_TASK ||
					      node_type == NODE_DAISSOCKET)) {
			/* Allocate arrays for maintainig stream connections */
			pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
					sizeof(struct stream_chnl), GFP_KERNEL);
			pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
					sizeof(struct stream_chnl), GFP_KERNEL);
			ptask_args = &(pnode->create_args.asa.task_arg_obj);
			ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
						sizeof(struct node_strmdef),
						GFP_KERNEL);
			ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
						sizeof(struct node_strmdef),
						GFP_KERNEL);
			if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
						       ptask_args->strm_in_def
						       == NULL))
			    || (MAX_OUTPUTS(pnode) > 0
				&& (pnode->outputs == NULL
				    || ptask_args->strm_out_def == NULL)))
				status = -ENOMEM;
		}
	}
	if (!status && (node_type != NODE_DEVICE)) {
		/* Create an event that will be posted when RMS_EXIT is
		 * received. */
		pnode->sync_done = kzalloc(sizeof(struct sync_object),
								GFP_KERNEL);
		if (pnode->sync_done)
			sync_init_event(pnode->sync_done);
		else
			status = -ENOMEM;

		if (!status) {
			/*Get the shared mem mgr for this nodes dev object */
			status = cmm_get_handle(hprocessor, &hcmm_mgr);
			if (!status) {
				/* Allocate a SM addr translator for this node
				 * w/ deflt attr */
				status = cmm_xlator_create(&pnode->xlator,
							   hcmm_mgr, NULL);
			}
		}
		if (!status) {
			/* Fill in message args */
			if ((pargs != NULL) && (pargs->cb_data > 0)) {
				pmsg_args =
				    &(pnode->create_args.asa.node_msg_args);
				pmsg_args->pdata = kzalloc(pargs->cb_data,
								GFP_KERNEL);
				if (pmsg_args->pdata == NULL) {
					status = -ENOMEM;
				} else {
					pmsg_args->arg_length = pargs->cb_data;
					memcpy(pmsg_args->pdata,
					       pargs->node_data,
					       pargs->cb_data);
				}
			}
		}
	}

	if (!status && node_type != NODE_DEVICE) {
		/* Create a message queue for this node */
		intf_fxns = hnode_mgr->intf_fxns;
		status =
		    (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
							&pnode->msg_queue_obj,
							0,
							pnode->create_args.asa.
							node_msg_args.max_msgs,
							pnode);
	}

	if (!status) {
		/* Create object for dynamic loading */

		status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
							   (void *)pnode,
							   &pnode->dcd_props.
							   obj_data.node_obj,
							   &pnode->
							   nldr_node_obj,
							   &pnode->phase_split);
	}

	/* Compare value read from Node Properties and check if it is same as
	 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
	 * GPP Address, Read the value in that address and override the
	 * stack_seg value in task args */
	if (!status &&
	    (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
	    stack_seg_name != NULL) {
		if (strcmp((char *)
			   pnode->dcd_props.obj_data.node_obj.ndb_props.
			   stack_seg_name, STACKSEGLABEL) == 0) {
			status =
			    hnode_mgr->nldr_fxns.
			    pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
					     &dynext_base);
			if (status)
				pr_err("%s: Failed to get addr for DYNEXT_BEG"
				       " status = 0x%x\n", __func__, status);

			status =
			    hnode_mgr->nldr_fxns.
			    pfn_get_fxn_addr(pnode->nldr_node_obj,
					     "L1DSRAM_HEAP", &pul_value);

			if (status)
				pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
				       " status = 0x%x\n", __func__, status);

			host_res = pbridge_context->resources;
			if (!host_res)
				status = -EPERM;

			if (status) {
				pr_err("%s: Failed to get host resource, status"
				       " = 0x%x\n", __func__, status);
				goto func_end;
			}

			ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
			off_set = pul_value - dynext_base;
			ul_stack_seg_addr = ul_gpp_mem_base + off_set;
			ul_stack_seg_val = readl(ul_stack_seg_addr);

			dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
				" 0x%x\n", __func__, ul_stack_seg_val,
				ul_stack_seg_addr);

			pnode->create_args.asa.task_arg_obj.stack_seg =
			    ul_stack_seg_val;

		}
	}

	if (!status) {
		/* Add the node to the node manager's list of allocated
		 * nodes. */
		lst_init_elem((struct list_head *)pnode);
		NODE_SET_STATE(pnode, NODE_ALLOCATED);

		mutex_lock(&hnode_mgr->node_mgr_lock);

		lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
			++(hnode_mgr->num_nodes);

		/* Exit critical section */
		mutex_unlock(&hnode_mgr->node_mgr_lock);

		/* Preset this to assume phases are split
		 * (for overlay and dll) */
		pnode->phase_split = true;

		/* Notify all clients registered for DSP_NODESTATECHANGE. */
		proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
	} else {
		/* Cleanup */
		if (pnode)
			delete_node(pnode, pr_ctxt);

	}

	if (!status) {
		status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
		if (status) {
			delete_node(pnode, pr_ctxt);
			goto func_end;
		}

		*noderes = (struct node_res_object *)node_res;
		drv_proc_node_update_heap_status(node_res, true);
		drv_proc_node_update_status(node_res, true);
	}
	DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
func_end:
	dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
		"node_res: %p status: 0x%x\n", __func__, hprocessor,
		node_uuid, pargs, attr_in, noderes, status);
	return status;
}