예제 #1
0
/*
 *  ======== bridge_msg_delete_queue ========
 *      Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
 */
void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
{
	struct msg_mgr *hmsg_mgr;
	u32 io_msg_pend;

	if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
		goto func_end;

	hmsg_mgr = msg_queue_obj->hmsg_mgr;
	msg_queue_obj->done = true;
	/*  Unblock all threads blocked in MSG_Get() or MSG_Put(). */
	io_msg_pend = msg_queue_obj->io_msg_pend;
	while (io_msg_pend) {
		/* Unblock thread */
		sync_set_event(msg_queue_obj->sync_done);
		/* Wait for acknowledgement */
		sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
		io_msg_pend = msg_queue_obj->io_msg_pend;
	}
	/* Remove message queue from hmsg_mgr->queue_list */
	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
	lst_remove_elem(hmsg_mgr->queue_list,
			(struct list_head *)msg_queue_obj);
	/* Free the message queue object */
	delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
	if (!hmsg_mgr->msg_free_list)
		goto func_cont;
	if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
		sync_reset_event(hmsg_mgr->sync_event);
func_cont:
	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
func_end:
	return;
}
예제 #2
0
/*
 *  ======== bridge_msg_delete_queue ========
 *      Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
 */
void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
{
	struct msg_mgr *hmsg_mgr;
	u32 io_msg_pend;

	if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
		return;

	hmsg_mgr = msg_queue_obj->msg_mgr;
	msg_queue_obj->done = true;
	/*  Unblock all threads blocked in MSG_Get() or MSG_Put(). */
	io_msg_pend = msg_queue_obj->io_msg_pend;
	while (io_msg_pend) {
		/* Unblock thread */
		sync_set_event(msg_queue_obj->sync_done);
		/* Wait for acknowledgement */
		sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
		io_msg_pend = msg_queue_obj->io_msg_pend;
	}
	/* Remove message queue from hmsg_mgr->queue_list */
	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
	list_del(&msg_queue_obj->list_elem);
	/* Free the message queue object */
	delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
	if (list_empty(&hmsg_mgr->msg_free_list))
		sync_reset_event(hmsg_mgr->sync_event);
	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
}
예제 #3
0
/*
 *  ======== bridge_chnl_get_ioc ========
 *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
 *      completion record, which contains information about the completed
 *      I/O request.
 *      Note: Ensures Channel Invariant (see notes above).
 */
int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 dwTimeOut,
			    OUT struct chnl_ioc *pIOC)
{
	int status = 0;
	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
	struct chnl_irp *chnl_packet_obj;
	int stat_sync;
	bool dequeue_ioc = true;
	struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
	u8 *host_sys_buf = NULL;
	struct wmd_dev_context *dev_ctxt;
	struct dev_object *dev_obj;

	/* Check args: */
	if (pIOC == NULL) {
		status = -EFAULT;
	} else if (!pchnl) {
		status = -EFAULT;
	} else if (dwTimeOut == CHNL_IOCNOWAIT) {
		if (LST_IS_EMPTY(pchnl->pio_completions))
			status = -EREMOTEIO;

	}

	dev_obj = dev_get_first();
	dev_get_wmd_context(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = -EFAULT;

	if (DSP_FAILED(status))
		goto func_end;

	ioc.status = CHNL_IOCSTATCOMPLETE;
	if (dwTimeOut !=
	    CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
		if (dwTimeOut == CHNL_IOCINFINITE)
			dwTimeOut = SYNC_INFINITE;

		stat_sync = sync_wait_on_event(pchnl->sync_event, dwTimeOut);
		if (stat_sync == -ETIME) {
			/* No response from DSP */
			ioc.status |= CHNL_IOCSTATTIMEOUT;
			dequeue_ioc = false;
		} else if (stat_sync == -EPERM) {
			/* This can occur when the user mode thread is
			 * aborted (^C), or when _VWIN32_WaitSingleObject()
			 * fails due to unkown causes. */
			/* Even though Wait failed, there may be something in
			 * the Q: */
			if (LST_IS_EMPTY(pchnl->pio_completions)) {
				ioc.status |= CHNL_IOCSTATCANCEL;
				dequeue_ioc = false;
			}
		}
	}
	/* See comment in AddIOReq */
	spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (dequeue_ioc) {
		/* Dequeue IOC and set pIOC; */
		DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
		chnl_packet_obj =
		    (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
		/* Update pIOC from channel state and chirp: */
		if (chnl_packet_obj) {
			pchnl->cio_cs--;
			/*  If this is a zero-copy channel, then set IOC's pbuf
			 *  to the DSP's address. This DSP address will get
			 *  translated to user's virtual addr later. */
			{
				host_sys_buf = chnl_packet_obj->host_sys_buf;
				ioc.pbuf = chnl_packet_obj->host_user_buf;
			}
			ioc.byte_size = chnl_packet_obj->byte_size;
			ioc.buf_size = chnl_packet_obj->buf_size;
			ioc.dw_arg = chnl_packet_obj->dw_arg;
			ioc.status |= chnl_packet_obj->status;
			/* Place the used chirp on the free list: */
			lst_put_tail(pchnl->free_packets_list,
				     (struct list_head *)chnl_packet_obj);
		} else {
			ioc.pbuf = NULL;
			ioc.byte_size = 0;
		}
	} else {
		ioc.pbuf = NULL;
		ioc.byte_size = 0;
		ioc.dw_arg = 0;
		ioc.buf_size = 0;
	}
	/* Ensure invariant: If any IOC's are queued for this channel... */
	if (!LST_IS_EMPTY(pchnl->pio_completions)) {
		/*  Since DSPStream_Reclaim() does not take a timeout
		 *  parameter, we pass the stream's timeout value to
		 *  bridge_chnl_get_ioc. We cannot determine whether or not
		 *  we have waited in User mode. Since the stream's timeout
		 *  value may be non-zero, we still have to set the event.
		 *  Therefore, this optimization is taken out.
		 *
		 *  if (dwTimeOut == CHNL_IOCNOWAIT) {
		 *    ... ensure event is set..
		 *      sync_set_event(pchnl->sync_event);
		 *  } */
		sync_set_event(pchnl->sync_event);
	} else {
		/* else, if list is empty, ensure event is reset. */
		sync_reset_event(pchnl->sync_event);
	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
	if (dequeue_ioc
	    && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
		if (!(ioc.pbuf < (void *)USERMODE_ADDR))
			goto func_cont;

		/* If the addr is in user mode, then copy it */
		if (!host_sys_buf || !ioc.pbuf) {
			status = -EFAULT;
			goto func_cont;
		}
		if (!CHNL_IS_INPUT(pchnl->chnl_mode))
			goto func_cont1;

		/*host_user_buf */
		status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
		if (status) {
			if (current->flags & PF_EXITING)
				status = 0;
		}
		if (status)
			status = -EFAULT;
func_cont1:
		kfree(host_sys_buf);
	}
func_cont:
	/* Update User's IOC block: */
	*pIOC = ioc;
func_end:
	return status;
}
예제 #4
0
/*
 *  ======== bridge_msg_put ========
 *      Put a message onto a msg_ctrl queue.
 */
int bridge_msg_put(struct msg_queue *msg_queue_obj,
			  IN CONST struct dsp_msg *pmsg, u32 utimeout)
{
	struct msg_frame *msg_frame_obj;
	struct msg_mgr *hmsg_mgr;
	bool put_msg = false;
	struct sync_object *syncs[2];
	u32 index;
	int status = 0;

	if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) {
		status = -ENOMEM;
		goto func_end;
	}
	hmsg_mgr = msg_queue_obj->hmsg_mgr;
	if (!hmsg_mgr->msg_free_list) {
		status = -EFAULT;
		goto func_end;
	}

	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);

	/* If a message frame is available, use it */
	if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
		msg_frame_obj =
		    (struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list);
		if (msg_frame_obj != NULL) {
			msg_frame_obj->msg_data.msg = *pmsg;
			msg_frame_obj->msg_data.msgq_id =
			    msg_queue_obj->msgq_id;
			lst_put_tail(hmsg_mgr->msg_used_list,
				     (struct list_head *)msg_frame_obj);
			hmsg_mgr->msgs_pending++;
			put_msg = true;
		}
		if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
			sync_reset_event(hmsg_mgr->sync_event);

		/* Release critical section before scheduling DPC */
		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
		/* Schedule a DPC, to do the actual data transfer: */
		iosm_schedule(hmsg_mgr->hio_mgr);
	} else {
		if (msg_queue_obj->done)
			status = -EPERM;
		else
			msg_queue_obj->io_msg_pend++;

		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
	}
	if (DSP_SUCCEEDED(status) && !put_msg) {
		/* Wait til a free message frame is available, timeout,
		 * or done */
		syncs[0] = hmsg_mgr->sync_event;
		syncs[1] = msg_queue_obj->sync_done;
		status = sync_wait_on_multiple_events(syncs, 2, utimeout,
						      &index);
		if (DSP_FAILED(status))
			goto func_end;
		/* Enter critical section */
		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
		if (msg_queue_obj->done) {
			msg_queue_obj->io_msg_pend--;
			/* Exit critical section */
			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
			/*  Signal that we're not going to access msg_queue_obj
			 *  anymore, so it can be deleted. */
			(void)sync_set_event(msg_queue_obj->sync_done_ack);
			status = -EPERM;
		} else {
			if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
				status = -EFAULT;
				goto func_cont;
			}
			/* Get msg from free list */
			msg_frame_obj = (struct msg_frame *)
			    lst_get_head(hmsg_mgr->msg_free_list);
			/*
			 * Copy message into pmsg and put frame on the
			 * used list.
			 */
			if (msg_frame_obj) {
				msg_frame_obj->msg_data.msg = *pmsg;
				msg_frame_obj->msg_data.msgq_id =
				    msg_queue_obj->msgq_id;
				lst_put_tail(hmsg_mgr->msg_used_list,
					     (struct list_head *)msg_frame_obj);
				hmsg_mgr->msgs_pending++;
				/*
				 * Schedule a DPC, to do the actual
				 * data transfer.
				 */
				iosm_schedule(hmsg_mgr->hio_mgr);
			}

			msg_queue_obj->io_msg_pend--;
			/* Reset event if there are still frames available */
			if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
				sync_set_event(hmsg_mgr->sync_event);
func_cont:
			/* Exit critical section */
			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
		}
	}
func_end:
	return status;
}
예제 #5
0
/*
 *  ======== bridge_msg_get ========
 *      Get a message from a msg_ctrl queue.
 */
int bridge_msg_get(struct msg_queue *msg_queue_obj,
			  struct dsp_msg *pmsg, u32 utimeout)
{
	struct msg_frame *msg_frame_obj;
	struct msg_mgr *hmsg_mgr;
	bool got_msg = false;
	struct sync_object *syncs[2];
	u32 index;
	int status = 0;

	if (!msg_queue_obj || pmsg == NULL) {
		status = -ENOMEM;
		goto func_end;
	}

	hmsg_mgr = msg_queue_obj->hmsg_mgr;
	if (!msg_queue_obj->msg_used_list) {
		status = -EFAULT;
		goto func_end;
	}

	/* Enter critical section */
	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
	/* If a message is already there, get it */
	if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
		msg_frame_obj = (struct msg_frame *)
		    lst_get_head(msg_queue_obj->msg_used_list);
		if (msg_frame_obj != NULL) {
			*pmsg = msg_frame_obj->msg_data.msg;
			lst_put_tail(msg_queue_obj->msg_free_list,
				     (struct list_head *)msg_frame_obj);
			if (LST_IS_EMPTY(msg_queue_obj->msg_used_list))
				sync_reset_event(msg_queue_obj->sync_event);
			else {
				ntfy_notify(msg_queue_obj->ntfy_obj,
					    DSP_NODEMESSAGEREADY);
				sync_set_event(msg_queue_obj->sync_event);
			}

			got_msg = true;
		}
	} else {
		if (msg_queue_obj->done)
			status = -EPERM;
		else
			msg_queue_obj->io_msg_pend++;

	}
	/* Exit critical section */
	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
	if (DSP_SUCCEEDED(status) && !got_msg) {
		/*  Wait til message is available, timeout, or done. We don't
		 *  have to schedule the DPC, since the DSP will send messages
		 *  when they are available. */
		syncs[0] = msg_queue_obj->sync_event;
		syncs[1] = msg_queue_obj->sync_done;
		status = sync_wait_on_multiple_events(syncs, 2, utimeout,
						      &index);
		/* Enter critical section */
		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
		if (msg_queue_obj->done) {
			msg_queue_obj->io_msg_pend--;
			/* Exit critical section */
			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
			/*  Signal that we're not going to access msg_queue_obj
			 *  anymore, so it can be deleted. */
			(void)sync_set_event(msg_queue_obj->sync_done_ack);
			status = -EPERM;
		} else {
			if (DSP_SUCCEEDED(status)) {
				DBC_ASSERT(!LST_IS_EMPTY
					   (msg_queue_obj->msg_used_list));
				/* Get msg from used list */
				msg_frame_obj = (struct msg_frame *)
				    lst_get_head(msg_queue_obj->msg_used_list);
				/* Copy message into pmsg and put frame on the
				 * free list */
				if (msg_frame_obj != NULL) {
					*pmsg = msg_frame_obj->msg_data.msg;
					lst_put_tail
					    (msg_queue_obj->msg_free_list,
					     (struct list_head *)
					     msg_frame_obj);
				}
			}
			msg_queue_obj->io_msg_pend--;
			/* Reset the event if there are still queued messages */
			if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
				ntfy_notify(msg_queue_obj->ntfy_obj,
					    DSP_NODEMESSAGEREADY);
				sync_set_event(msg_queue_obj->sync_event);
			}
			/* Exit critical section */
			spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
		}
	}
func_end:
	return status;
}
예제 #6
0
/*
 *  ======== bridge_msg_create_queue ========
 *      Create a msg_queue for sending/receiving messages to/from a node
 *      on the DSP.
 */
int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
				OUT struct msg_queue **phMsgQueue,
				u32 msgq_id, u32 max_msgs, bhandle arg)
{
	u32 i;
	u32 num_allocated = 0;
	struct msg_queue *msg_q;
	int status = 0;

	if (!hmsg_mgr || phMsgQueue == NULL || !hmsg_mgr->msg_free_list) {
		status = -EFAULT;
		goto func_end;
	}

	*phMsgQueue = NULL;
	/* Allocate msg_queue object */
	msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
	if (!msg_q) {
		status = -ENOMEM;
		goto func_end;
	}
	lst_init_elem((struct list_head *)msg_q);
	msg_q->max_msgs = max_msgs;
	msg_q->hmsg_mgr = hmsg_mgr;
	msg_q->arg = arg;	/* Node handle */
	msg_q->msgq_id = msgq_id;	/* Node env (not valid yet) */
	/* Queues of Message frames for messages from the DSP */
	msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
	msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
	if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
		status = -ENOMEM;
	else {
		INIT_LIST_HEAD(&msg_q->msg_free_list->head);
		INIT_LIST_HEAD(&msg_q->msg_used_list->head);
	}

	/*  Create event that will be signalled when a message from
	 *  the DSP is available. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_event = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_event)
			sync_init_event(msg_q->sync_event);
		else
			status = -ENOMEM;
	}

	/* Create a notification list for message ready notification. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
							GFP_KERNEL);
		if (msg_q->ntfy_obj)
			ntfy_init(msg_q->ntfy_obj);
		else
			status = -ENOMEM;
	}

	/*  Create events that will be used to synchronize cleanup
	 *  when the object is deleted. sync_done will be set to
	 *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
	 *  will be set by the unblocked thread to signal that it
	 *  is unblocked and will no longer reference the object. */
	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_done = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_done)
			sync_init_event(msg_q->sync_done);
		else
			status = -ENOMEM;
	}

	if (DSP_SUCCEEDED(status)) {
		msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
							GFP_KERNEL);
		if (msg_q->sync_done_ack)
			sync_init_event(msg_q->sync_done_ack);
		else
			status = -ENOMEM;
	}

	if (DSP_SUCCEEDED(status)) {
		/* Enter critical section */
		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
		/* Initialize message frames and put in appropriate queues */
		for (i = 0; i < max_msgs && DSP_SUCCEEDED(status); i++) {
			status = add_new_msg(hmsg_mgr->msg_free_list);
			if (DSP_SUCCEEDED(status)) {
				num_allocated++;
				status = add_new_msg(msg_q->msg_free_list);
			}
		}
		if (DSP_FAILED(status)) {
			/*  Stay inside CS to prevent others from taking any
			 *  of the newly allocated message frames. */
			delete_msg_queue(msg_q, num_allocated);
		} else {
			lst_put_tail(hmsg_mgr->queue_list,
				     (struct list_head *)msg_q);
			*phMsgQueue = msg_q;
			/* Signal that free frames are now available */
			if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
				sync_set_event(hmsg_mgr->sync_event);

		}
		/* Exit critical section */
		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
	} else {
		delete_msg_queue(msg_q, 0);
	}
func_end:
	return status;
}
예제 #7
0
파일: strm.c 프로젝트: 119-org/hi3518-osdrv
/*
 *  ======== strm_select ========
 *  Purpose:
 *      Selects a ready stream.
 */
int strm_select(struct strm_object **strm_tab, u32 strms,
		       u32 *pmask, u32 utimeout)
{
	u32 index;
	struct chnl_info chnl_info_obj;
	struct bridge_drv_interface *intf_fxns;
	struct sync_object **sync_events = NULL;
	u32 i;
	int status = 0;

	DBC_REQUIRE(refs > 0);
	DBC_REQUIRE(strm_tab != NULL);
	DBC_REQUIRE(pmask != NULL);
	DBC_REQUIRE(strms > 0);

	*pmask = 0;
	for (i = 0; i < strms; i++) {
		if (!strm_tab[i]) {
			status = -EFAULT;
			break;
		}
	}
	if (status)
		goto func_end;

	/* Determine which channels have IO ready */
	for (i = 0; i < strms; i++) {
		intf_fxns = strm_tab[i]->strm_mgr_obj->intf_fxns;
		status = (*intf_fxns->chnl_get_info) (strm_tab[i]->chnl_obj,
							  &chnl_info_obj);
		if (status) {
			break;
		} else {
			if (chnl_info_obj.cio_cs > 0)
				*pmask |= (1 << i);

		}
	}
	if (!status && utimeout > 0 && *pmask == 0) {
		/* Non-zero timeout */
		sync_events = kmalloc(strms * sizeof(struct sync_object *),
								GFP_KERNEL);

		if (sync_events == NULL) {
			status = -ENOMEM;
		} else {
			for (i = 0; i < strms; i++) {
				intf_fxns =
				    strm_tab[i]->strm_mgr_obj->intf_fxns;
				status = (*intf_fxns->chnl_get_info)
				    (strm_tab[i]->chnl_obj, &chnl_info_obj);
				if (status)
					break;
				else
					sync_events[i] =
					    chnl_info_obj.sync_event;

			}
		}
		if (!status) {
			status =
			    sync_wait_on_multiple_events(sync_events, strms,
							 utimeout, &index);
			if (!status) {
				/* Since we waited on the event, we have to
				 * reset it */
				sync_set_event(sync_events[index]);
				*pmask = 1 << index;
			}
		}
	}
func_end:
	kfree(sync_events);

	DBC_ENSURE((!status && (*pmask != 0 || utimeout == 0)) ||
		   (status && *pmask == 0));

	return status;
}
예제 #8
0
/*
 *  ======== bridge_msg_create_queue ========
 *      Create a msg_queue for sending/receiving messages to/from a node
 *      on the DSP.
 */
int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq,
				u32 msgq_id, u32 max_msgs, void *arg)
{
	u32 i;
	u32 num_allocated = 0;
	struct msg_queue *msg_q;
	int status = 0;

	if (!hmsg_mgr || msgq == NULL)
		return -EFAULT;

	*msgq = NULL;
	/* Allocate msg_queue object */
	msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
	if (!msg_q)
		return -ENOMEM;

	msg_q->max_msgs = max_msgs;
	msg_q->msg_mgr = hmsg_mgr;
	msg_q->arg = arg;	/* Node handle */
	msg_q->msgq_id = msgq_id;	/* Node env (not valid yet) */
	/* Queues of Message frames for messages from the DSP */
	INIT_LIST_HEAD(&msg_q->msg_free_list);
	INIT_LIST_HEAD(&msg_q->msg_used_list);

	/*  Create event that will be signalled when a message from
	 *  the DSP is available. */
	msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
	if (!msg_q->sync_event) {
		status = -ENOMEM;
		goto out_err;

	}
	sync_init_event(msg_q->sync_event);

	/* Create a notification list for message ready notification. */
	msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
	if (!msg_q->ntfy_obj) {
		status = -ENOMEM;
		goto out_err;
	}
	ntfy_init(msg_q->ntfy_obj);

	/*  Create events that will be used to synchronize cleanup
	 *  when the object is deleted. sync_done will be set to
	 *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
	 *  will be set by the unblocked thread to signal that it
	 *  is unblocked and will no longer reference the object. */
	msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
	if (!msg_q->sync_done) {
		status = -ENOMEM;
		goto out_err;
	}
	sync_init_event(msg_q->sync_done);

	msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
	if (!msg_q->sync_done_ack) {
		status = -ENOMEM;
		goto out_err;
	}
	sync_init_event(msg_q->sync_done_ack);

	/* Enter critical section */
	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
	/* Initialize message frames and put in appropriate queues */
	for (i = 0; i < max_msgs && !status; i++) {
		status = add_new_msg(&hmsg_mgr->msg_free_list);
		if (!status) {
			num_allocated++;
			status = add_new_msg(&msg_q->msg_free_list);
		}
	}
	if (status) {
		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
		goto out_err;
	}

	list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list);
	*msgq = msg_q;
	/* Signal that free frames are now available */
	if (!list_empty(&hmsg_mgr->msg_free_list))
		sync_set_event(hmsg_mgr->sync_event);

	/* Exit critical section */
	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);

	return 0;
out_err:
	delete_msg_queue(msg_q, num_allocated);
	return status;
}