size_t cbuf_write_char(cbuf_t *cbuf, char c, bool canreschedule) { DEBUG_ASSERT(cbuf); enter_critical_section(); size_t ret = 0; if (cbuf_space_avail(cbuf) > 0) { cbuf->buf[cbuf->head] = c; cbuf->head = INC_POINTER(cbuf, cbuf->head, 1); ret = 1; if (cbuf->head != cbuf->tail) event_signal(&cbuf->event, canreschedule); } exit_critical_section(); return ret; }
size_t cbuf_write_char(cbuf_t* cbuf, char c) { DEBUG_ASSERT(cbuf); size_t ret = 0; { AutoSpinLock guard(&cbuf->lock); if (cbuf_space_avail(cbuf) > 0) { cbuf->buf[cbuf->head] = c; cbuf->head = inc_pointer(cbuf, cbuf->head, 1); ret = 1; } } if (ret > 0) { event_signal(&cbuf->event, true); } return ret; }
size_t cbuf_write(cbuf_t *cbuf, const void *_buf, size_t len, bool canreschedule) { const char *buf = (const char *)_buf; LTRACEF("len %zd\n", len); DEBUG_ASSERT(cbuf); DEBUG_ASSERT(_buf); DEBUG_ASSERT(len < valpow2(cbuf->len_pow2)); enter_critical_section(); size_t write_len; size_t pos = 0; while (pos < len && cbuf_space_avail(cbuf) > 0) { if (cbuf->head >= cbuf->tail) { write_len = MIN(valpow2(cbuf->len_pow2) - cbuf->head, len - pos); } else { write_len = MIN(cbuf->tail - cbuf->head - 1, len - pos); } // if it's full, abort and return how much we've written if (write_len == 0) { break; } memcpy(cbuf->buf + cbuf->head, buf + pos, write_len); cbuf->head = INC_POINTER(cbuf, cbuf->head, write_len); pos += write_len; } if (cbuf->head != cbuf->tail) event_signal(&cbuf->event, canreschedule); exit_critical_section(); return pos; }
static interrupt_eoi uart_irq_handler(void* arg) { /* read interrupt status and mask */ while ((UARTREG(MX8_USR1) & USR1_RRDY)) { if (cbuf_space_avail(&uart_rx_buf) == 0) { break; } char c = UARTREG(MX8_URXD) & 0xFF; cbuf_write_char(&uart_rx_buf, c); } /* Signal if anyone is waiting to TX */ if (UARTREG(MX8_UCR1) & UCR1_TRDYEN) { spin_lock(&uart_spinlock); if (!(UARTREG(MX8_USR2) & UTS_TXFULL)) { // signal event_signal(&uart_dputc_event, true); } spin_unlock(&uart_spinlock); } return IRQ_EOI_DEACTIVATE; }