static void ks8695uart_stop_rx(struct uart_port *port) { if (rx_enabled(port)) { disable_irq(KS8695_IRQ_UART_RX); rx_enable(port, 0); } }
void nrfx_uart_rx_enable(nrfx_uart_t const * p_instance) { if (!m_cb[p_instance->drv_inst_idx].rx_enabled) { rx_enable(p_instance); m_cb[p_instance->drv_inst_idx].rx_enabled = true; } }
/** * Prepare reception of query hit data by building an appropriate RX stack. * * @return TRUE if we may continue with the download, FALSE if the search * was already closed in the GUI. */ gboolean browse_host_dl_receive( struct browse_ctx *bc, gnet_host_t *host, wrap_io_t *wio, const char *vendor, guint32 flags) { g_assert(bc != NULL); if (bc->closed) return FALSE; gnet_host_copy(&bc->host, host); bc->vendor = atom_str_get(vendor); /* * Freeing of the RX stack must be asynchronous: each time we establish * a new connection, dismantle the previous stack. Otherwise the RX * stack will be freed when the corresponding download structure is * reclaimed. */ if (bc->rx != NULL) { rx_free(bc->rx); bc->rx = NULL; } { struct rx_link_args args; args.cb = &browse_rx_link_cb; args.bws = bsched_in_select_by_addr(gnet_host_get_addr(&bc->host)); args.wio = wio; bc->rx = rx_make(bc, &bc->host, rx_link_get_ops(), &args); } if (flags & BH_DL_CHUNKED) { struct rx_chunk_args args; args.cb = &browse_rx_chunk_cb; bc->rx = rx_make_above(bc->rx, rx_chunk_get_ops(), &args); } if (flags & BH_DL_INFLATE) { struct rx_inflate_args args; args.cb = &browse_rx_inflate_cb; bc->rx = rx_make_above(bc->rx, rx_inflate_get_ops(), &args); } rx_set_data_ind(bc->rx, browse_data_ind); rx_enable(bc->rx); return TRUE; }
static int ks8695uart_startup(struct uart_port *port) { int retval; set_irq_flags(KS8695_IRQ_UART_TX, IRQF_VALID | IRQF_NOAUTOEN); tx_enable(port, 0); rx_enable(port, 1); ms_enable(port, 1); /* * Allocate the IRQ */ retval = request_irq(KS8695_IRQ_UART_TX, ks8695uart_tx_chars, IRQF_DISABLED, "UART TX", port); if (retval) goto err_tx; retval = request_irq(KS8695_IRQ_UART_RX, ks8695uart_rx_chars, IRQF_DISABLED, "UART RX", port); if (retval) goto err_rx; retval = request_irq(KS8695_IRQ_UART_LINE_STATUS, ks8695uart_rx_chars, IRQF_DISABLED, "UART LineStatus", port); if (retval) goto err_ls; retval = request_irq(KS8695_IRQ_UART_MODEM_STATUS, ks8695uart_modem_status, IRQF_DISABLED, "UART ModemStatus", port); if (retval) goto err_ms; return 0; err_ms: free_irq(KS8695_IRQ_UART_LINE_STATUS, port); err_ls: free_irq(KS8695_IRQ_UART_RX, port); err_rx: free_irq(KS8695_IRQ_UART_TX, port); err_tx: return retval; }
nrfx_err_t nrfx_uart_rx(nrfx_uart_t const * p_instance, uint8_t * p_data, size_t length) { uart_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx]; NRFX_ASSERT(m_cb[p_instance->drv_inst_idx].state == NRFX_DRV_STATE_INITIALIZED); NRFX_ASSERT(p_data); NRFX_ASSERT(length > 0); nrfx_err_t err_code; bool second_buffer = false; if (p_cb->handler) { nrf_uart_int_disable(p_instance->p_reg, NRF_UART_INT_MASK_RXDRDY | NRF_UART_INT_MASK_ERROR); } if (p_cb->rx_buffer_length != 0) { if (p_cb->rx_secondary_buffer_length != 0) { if (p_cb->handler) { nrf_uart_int_enable(p_instance->p_reg, NRF_UART_INT_MASK_RXDRDY | NRF_UART_INT_MASK_ERROR); } err_code = NRFX_ERROR_BUSY; NRFX_LOG_WARNING("Function: %s, error code: %s.", __func__, NRFX_LOG_ERROR_STRING_GET(err_code)); return err_code; } second_buffer = true; } if (!second_buffer) { p_cb->rx_buffer_length = length; p_cb->p_rx_buffer = p_data; p_cb->rx_counter = 0; p_cb->rx_secondary_buffer_length = 0; } else { p_cb->p_rx_secondary_buffer = p_data; p_cb->rx_secondary_buffer_length = length; } NRFX_LOG_INFO("Transfer rx_len: %d.", length); if ((!p_cb->rx_enabled) && (!second_buffer)) { rx_enable(p_instance); } if (p_cb->handler == NULL) { nrf_uart_event_clear(p_instance->p_reg, NRF_UART_EVENT_RXTO); bool rxrdy; bool rxto; bool error; do { do { error = nrf_uart_event_check(p_instance->p_reg, NRF_UART_EVENT_ERROR); rxrdy = nrf_uart_event_check(p_instance->p_reg, NRF_UART_EVENT_RXDRDY); rxto = nrf_uart_event_check(p_instance->p_reg, NRF_UART_EVENT_RXTO); } while ((!rxrdy) && (!rxto) && (!error)); if (error || rxto) { break; } rx_byte(p_instance->p_reg, p_cb); } while (p_cb->rx_buffer_length > p_cb->rx_counter); p_cb->rx_buffer_length = 0; if (error) { err_code = NRFX_ERROR_INTERNAL; NRFX_LOG_WARNING("Function: %s, error code: %s.", __func__, NRFX_LOG_ERROR_STRING_GET(err_code)); return err_code; } if (rxto) { err_code = NRFX_ERROR_FORBIDDEN; NRFX_LOG_WARNING("Function: %s, error code: %s.", __func__, NRFX_LOG_ERROR_STRING_GET(err_code)); return err_code; } if (p_cb->rx_enabled) { nrf_uart_task_trigger(p_instance->p_reg, NRF_UART_TASK_STARTRX); } else { // Skip stopping RX if driver is forced to be enabled. nrf_uart_task_trigger(p_instance->p_reg, NRF_UART_TASK_STOPRX); } } else { nrf_uart_int_enable(p_instance->p_reg, NRF_UART_INT_MASK_RXDRDY | NRF_UART_INT_MASK_ERROR); } err_code = NRFX_SUCCESS; NRFX_LOG_INFO("Function: %s, error code: %s.", __func__, NRFX_LOG_ERROR_STRING_GET(err_code)); return err_code; }
/** * Prepare reception of THEX data by building an appropriate RX stack. * * @return TRUE if we may continue with the download. */ bool thex_download_receive(struct thex_download *ctx, filesize_t content_length, gnet_host_t *host, struct wrap_io *wio, uint32 flags) { g_assert(ctx != NULL); gnet_host_copy(&ctx->host, host); /* * Freeing of the RX stack must be asynchronous: each time we establish * a new connection, dismantle the previous stack. Otherwise the RX * stack will be freed when the corresponding download structure is * reclaimed. */ if (ctx->rx != NULL) { rx_free(ctx->rx); ctx->rx = NULL; } /* * If there is a Content-Length indication in the HTTP reply, it is * supplied here and will be used as a limit of the data we'll read. * * If there was none (for instance if the output is chunked), then 0 * is given and we'll use a hardwired maximum. */ if (content_length > MAX_INT_VAL(size_t)) return FALSE; ctx->max_size = content_length ? (size_t) content_length : THEX_DOWNLOAD_MAX_SIZE; { struct rx_link_args args; args.cb = &thex_rx_link_cb; args.bws = bsched_in_select_by_addr(gnet_host_get_addr(&ctx->host)); args.wio = wio; ctx->rx = rx_make(ctx, &ctx->host, rx_link_get_ops(), &args); } if (flags & THEX_DOWNLOAD_F_CHUNKED) { struct rx_chunk_args args; args.cb = &thex_rx_chunk_cb; ctx->rx = rx_make_above(ctx->rx, rx_chunk_get_ops(), &args); } if (flags & THEX_DOWNLOAD_F_INFLATE) { struct rx_inflate_args args; args.cb = &thex_rx_inflate_cb; ctx->rx = rx_make_above(ctx->rx, rx_inflate_get_ops(), &args); } rx_set_data_ind(ctx->rx, thex_download_data_ind); rx_enable(ctx->rx); return TRUE; }