/* * ======== bridge_chnl_cancel_io ======== * Return all I/O requests to the client which have not yet been * transferred. The channel's I/O completion object is * signalled, and all the I/O requests are queued as IOC's, with the * status field set to CHNL_IOCSTATCANCEL. * This call is typically used in abort situations, and is a prelude to * chnl_close(); */ int bridge_chnl_cancel_io(struct chnl_object *chnl_obj) { int status = 0; struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; u32 chnl_id = -1; s8 chnl_mode; struct chnl_irp *chnl_packet_obj; struct chnl_mgr *chnl_mgr_obj = NULL; /* Check args: */ if (pchnl && pchnl->chnl_mgr_obj) { chnl_id = pchnl->chnl_id; chnl_mode = pchnl->chnl_mode; chnl_mgr_obj = pchnl->chnl_mgr_obj; } else { status = -EFAULT; } if (DSP_FAILED(status)) goto func_end; /* Mark this channel as cancelled, to prevent further IORequests or * IORequests or dispatching. */ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock); pchnl->dw_state |= CHNL_STATECANCEL; if (LST_IS_EMPTY(pchnl->pio_requests)) goto func_cont; if (pchnl->chnl_type == CHNL_PCPY) { /* Indicate we have no more buffers available for transfer: */ if (CHNL_IS_INPUT(pchnl->chnl_mode)) { io_cancel_chnl(chnl_mgr_obj->hio_mgr, chnl_id); } else { /* Record that we no longer have output buffers * available: */ chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id); } } /* Move all IOR's to IOC queue: */ while (!LST_IS_EMPTY(pchnl->pio_requests)) { chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_requests); if (chnl_packet_obj) { chnl_packet_obj->byte_size = 0; chnl_packet_obj->status |= CHNL_IOCSTATCANCEL; lst_put_tail(pchnl->pio_completions, (struct list_head *)chnl_packet_obj); pchnl->cio_cs++; pchnl->cio_reqs--; DBC_ASSERT(pchnl->cio_reqs >= 0); } } func_cont: spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); func_end: return status; }
/* * ======== bridge_chnl_add_io_req ======== * Enqueue an I/O request for data transfer on a channel to the DSP. * The direction (mode) is specified in the channel object. Note the DSP * address is specified for channels opened in direct I/O mode. */ int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *pHostBuf, u32 byte_size, u32 buf_size, OPTIONAL u32 dw_dsp_addr, u32 dw_arg) { int status = 0; struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; struct chnl_irp *chnl_packet_obj = NULL; struct wmd_dev_context *dev_ctxt; struct dev_object *dev_obj; u8 dw_state; bool is_eos; struct chnl_mgr *chnl_mgr_obj; u8 *host_sys_buf = NULL; bool sched_dpc = false; u16 mb_val = 0; is_eos = (byte_size == 0); /* Validate args: */ if (pHostBuf == NULL) { status = -EFAULT; } else if (!pchnl) { status = -EFAULT; } else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) { status = -EPERM; } else { /* Check the channel state: only queue chirp if channel state * allows */ dw_state = pchnl->dw_state; if (dw_state != CHNL_STATEREADY) { if (dw_state & CHNL_STATECANCEL) status = -ECANCELED; else if ((dw_state & CHNL_STATEEOS) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) status = -EPIPE; else /* No other possible states left: */ DBC_ASSERT(0); } } if (DSP_FAILED(status)) goto func_end; chnl_mgr_obj = pchnl->chnl_mgr_obj; dev_obj = dev_get_first(); dev_get_wmd_context(dev_obj, &dev_ctxt); if (!dev_ctxt || !chnl_mgr_obj) status = -EFAULT; if (DSP_FAILED(status)) goto func_end; if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && pHostBuf) { if (!(pHostBuf < (void *)USERMODE_ADDR)) { host_sys_buf = pHostBuf; goto func_cont; } /* if addr in user mode, then copy to kernel space */ host_sys_buf = kmalloc(buf_size, GFP_KERNEL); if (host_sys_buf == NULL) { status = -ENOMEM; goto func_end; } if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) { status = copy_from_user(host_sys_buf, pHostBuf, buf_size); if (status) { kfree(host_sys_buf); host_sys_buf = NULL; status = -EFAULT; goto func_end; } } } func_cont: /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY * channels. DPCCS is held to avoid race conditions with PCPY channels. * If DPC is scheduled in process context (iosm_schedule) and any * non-mailbox interrupt occurs, that DPC will run and break CS. Hence * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */ spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock); omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); if (pchnl->chnl_type == CHNL_PCPY) { /* This is a processor-copy channel. */ if (DSP_SUCCEEDED(status) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) { /* Check buffer size on output channels for fit. */ if (byte_size > io_buf_size(pchnl->chnl_mgr_obj->hio_mgr)) status = -EINVAL; } } if (DSP_SUCCEEDED(status)) { /* Get a free chirp: */ chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->free_packets_list); if (chnl_packet_obj == NULL) status = -EIO; } if (DSP_SUCCEEDED(status)) { /* Enqueue the chirp on the chnl's IORequest queue: */ chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf = pHostBuf; if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1) chnl_packet_obj->host_sys_buf = host_sys_buf; /* * Note: for dma chans dw_dsp_addr contains dsp address * of SM buffer. */ DBC_ASSERT(chnl_mgr_obj->word_size != 0); /* DSP address */ chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size; chnl_packet_obj->byte_size = byte_size; chnl_packet_obj->buf_size = buf_size; /* Only valid for output channel */ chnl_packet_obj->dw_arg = dw_arg; chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS : CHNL_IOCSTATCOMPLETE); lst_put_tail(pchnl->pio_requests, (struct list_head *)chnl_packet_obj); pchnl->cio_reqs++; DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets); /* If end of stream, update the channel state to prevent * more IOR's: */ if (is_eos) pchnl->dw_state |= CHNL_STATEEOS; /* Legacy DSM Processor-Copy */ DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY); /* Request IO from the DSP */ io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl, (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT : IO_OUTPUT), &mb_val); sched_dpc = true; } omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX); spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); if (mb_val != 0) io_intr_dsp2(chnl_mgr_obj->hio_mgr, mb_val); /* Schedule a DPC, to do the actual data transfer: */ if (sched_dpc) iosm_schedule(chnl_mgr_obj->hio_mgr); func_end: return status; }
/* * ======== bridge_chnl_get_ioc ======== * Optionally wait for I/O completion on a channel. Dequeue an I/O * completion record, which contains information about the completed * I/O request. * Note: Ensures Channel Invariant (see notes above). */ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 dwTimeOut, OUT struct chnl_ioc *pIOC) { int status = 0; struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; struct chnl_irp *chnl_packet_obj; int stat_sync; bool dequeue_ioc = true; struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 }; u8 *host_sys_buf = NULL; struct wmd_dev_context *dev_ctxt; struct dev_object *dev_obj; /* Check args: */ if (pIOC == NULL) { status = -EFAULT; } else if (!pchnl) { status = -EFAULT; } else if (dwTimeOut == CHNL_IOCNOWAIT) { if (LST_IS_EMPTY(pchnl->pio_completions)) status = -EREMOTEIO; } dev_obj = dev_get_first(); dev_get_wmd_context(dev_obj, &dev_ctxt); if (!dev_ctxt) status = -EFAULT; if (DSP_FAILED(status)) goto func_end; ioc.status = CHNL_IOCSTATCOMPLETE; if (dwTimeOut != CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) { if (dwTimeOut == CHNL_IOCINFINITE) dwTimeOut = SYNC_INFINITE; stat_sync = sync_wait_on_event(pchnl->sync_event, dwTimeOut); if (stat_sync == -ETIME) { /* No response from DSP */ ioc.status |= CHNL_IOCSTATTIMEOUT; dequeue_ioc = false; } else if (stat_sync == -EPERM) { /* This can occur when the user mode thread is * aborted (^C), or when _VWIN32_WaitSingleObject() * fails due to unkown causes. */ /* Even though Wait failed, there may be something in * the Q: */ if (LST_IS_EMPTY(pchnl->pio_completions)) { ioc.status |= CHNL_IOCSTATCANCEL; dequeue_ioc = false; } } } /* See comment in AddIOReq */ spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); if (dequeue_ioc) { /* Dequeue IOC and set pIOC; */ DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions)); chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_completions); /* Update pIOC from channel state and chirp: */ if (chnl_packet_obj) { pchnl->cio_cs--; /* If this is a zero-copy channel, then set IOC's pbuf * to the DSP's address. This DSP address will get * translated to user's virtual addr later. */ { host_sys_buf = chnl_packet_obj->host_sys_buf; ioc.pbuf = chnl_packet_obj->host_user_buf; } ioc.byte_size = chnl_packet_obj->byte_size; ioc.buf_size = chnl_packet_obj->buf_size; ioc.dw_arg = chnl_packet_obj->dw_arg; ioc.status |= chnl_packet_obj->status; /* Place the used chirp on the free list: */ lst_put_tail(pchnl->free_packets_list, (struct list_head *)chnl_packet_obj); } else { ioc.pbuf = NULL; ioc.byte_size = 0; } } else { ioc.pbuf = NULL; ioc.byte_size = 0; ioc.dw_arg = 0; ioc.buf_size = 0; } /* Ensure invariant: If any IOC's are queued for this channel... */ if (!LST_IS_EMPTY(pchnl->pio_completions)) { /* Since DSPStream_Reclaim() does not take a timeout * parameter, we pass the stream's timeout value to * bridge_chnl_get_ioc. We cannot determine whether or not * we have waited in User mode. Since the stream's timeout * value may be non-zero, we still have to set the event. * Therefore, this optimization is taken out. * * if (dwTimeOut == CHNL_IOCNOWAIT) { * ... ensure event is set.. * sync_set_event(pchnl->sync_event); * } */ sync_set_event(pchnl->sync_event); } else { /* else, if list is empty, ensure event is reset. */ sync_reset_event(pchnl->sync_event); } omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX); spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); if (dequeue_ioc && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) { if (!(ioc.pbuf < (void *)USERMODE_ADDR)) goto func_cont; /* If the addr is in user mode, then copy it */ if (!host_sys_buf || !ioc.pbuf) { status = -EFAULT; goto func_cont; } if (!CHNL_IS_INPUT(pchnl->chnl_mode)) goto func_cont1; /*host_user_buf */ status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size); if (status) { if (current->flags & PF_EXITING) status = 0; } if (status) status = -EFAULT; func_cont1: kfree(host_sys_buf); } func_cont: /* Update User's IOC block: */ *pIOC = ioc; func_end: return status; }
static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, u8 io_mode) { struct chnl_mgr *chnl_mgr_obj; struct shm *sm; u32 chnl_id; u32 bytes; struct chnl_irp *chnl_packet_obj = NULL; u32 dw_arg; bool clear_chnl = false; bool notify_client = false; sm = pio_mgr->shared_mem; chnl_mgr_obj = pio_mgr->chnl_mgr; if (!sm->input_full) goto func_end; bytes = sm->input_size * chnl_mgr_obj->word_size; chnl_id = sm->input_id; dw_arg = sm->arg; if (chnl_id >= CHNL_MAXCHANNELS) { goto func_end; } pchnl = chnl_mgr_obj->channels[chnl_id]; if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) { if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) { if (!list_empty(&pchnl->io_requests)) { if (!pchnl->cio_reqs) goto func_end; chnl_packet_obj = list_first_entry( &pchnl->io_requests, struct chnl_irp, link); list_del(&chnl_packet_obj->link); pchnl->cio_reqs--; bytes = min(bytes, chnl_packet_obj->byte_size); memcpy(chnl_packet_obj->host_sys_buf, pio_mgr->input, bytes); pchnl->bytes_moved += bytes; chnl_packet_obj->byte_size = bytes; chnl_packet_obj->arg = dw_arg; chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE; if (bytes == 0) { if (pchnl->state & CHNL_STATEEOS) goto func_end; chnl_packet_obj->status |= CHNL_IOCSTATEOS; pchnl->state |= CHNL_STATEEOS; ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE); } if (list_empty(&pchnl->io_requests)) set_chnl_free(sm, pchnl->chnl_id); clear_chnl = true; notify_client = true; } else { clear_chnl = true; } } else {