externC cyg_bool cyg_drv_cond_wait( cyg_drv_cond_t *cond ) { CYG_REPORT_FUNCTION(); CYG_ASSERT( cond->mutex != NULL, "Uninitialized condition variable"); CYG_ASSERT( cond->mutex->lock, "Mutex not locked"); cyg_drv_dsr_lock(); cond->wait = 1; while( cond->wait == 1 ) { // While looping we call call_dsrs() to service any DSRs that // get posted. One of these will make the call to cond_signal // to break us out of this loop. If we do not have the DSR // lock claimed, then a race condition could occur and keep us // stuck here forever. call_dsrs(); } cyg_drv_dsr_unlock(); CYG_REPORT_RETURN(); return true; }
//=========================================================================== // Callback for received events //=========================================================================== static void can_rcv_event(can_channel *chan, void *pdata) { can_cbuf_t *cbuf = &chan->in_cbuf; CYG_CAN_EVENT_T *prxbuf = (CYG_CAN_EVENT_T *)cbuf->pdata; #ifdef CYGOPT_IO_CAN_SUPPORT_CALLBACK cyg_uint16 flags; #endif // // cbuf is a ring buffer - if the buffer is full, then we overwrite the // oldest message in buffer so the user will always get the actual and // last state of the external hardware that is connected to the // CAN bus. We need to call cyg_drv_dsr_lock() here because this function // may be called from different message box interrupts and so we have to // protect data access here // cyg_drv_dsr_lock(); prxbuf[cbuf->put].flags = 0; // clear flags because it is a new event if (chan->funs->getevent(chan, &prxbuf[cbuf->put], pdata)) { if (cbuf->data_cnt < cbuf->len) { cbuf->data_cnt++; } else { // // the buffer is full but a new message arrived. We store this new // message and overwrite the oldest one, but at least we tell the user // that there is an overrun in RX queue // prxbuf[cbuf->put].flags |= CYGNUM_CAN_EVENT_OVERRUN_RX; cbuf->get = (cbuf->get + 1) % cbuf->len; } #ifdef CYGOPT_IO_CAN_SUPPORT_CALLBACK flags = prxbuf[cbuf->put].flags; #endif cbuf->put = (cbuf->put + 1) % cbuf->len; if (cbuf->waiting) { cbuf->waiting = false; cyg_drv_cond_broadcast(&cbuf->wait); } #ifdef CYGOPT_IO_CAN_SUPPORT_CALLBACK // Call application callback function, if any of the flag events // are unmasked. if((flags & chan->callback_cfg.flag_mask) && (chan->callback_cfg.callback_func)) { chan->callback_cfg.callback_func(flags, chan->callback_cfg.data); } #endif } cyg_drv_dsr_unlock(); }
static void usbs_at91_endpoint_set_halted (usbs_rx_endpoint * pep, cyg_bool new_value) { int epn = usbs_at91_pep_to_number(pep); cyg_addrword_t pCSR = pCSRn(epn); cyg_drv_dsr_lock (); if (pep->halted != new_value) { /* There is something is to do */ pep->halted = new_value; if (new_value && BITS_ARE_SET (pIMR, 1 << epn)) { /* Ready to transmit */ if (pep->complete_fn) { (*pep->complete_fn) (pep->complete_data, -EAGAIN); } usbs_at91_endpoint_interrupt_enable (epn, false); SET_BITS (pCSR, AT91_UDP_CSR_FORCESTALL); } else { CLEAR_BITS (pCSR, AT91_UDP_CSR_FORCESTALL); } } cyg_drv_dsr_unlock (); }
// DSPI DSR static void dspi_DSR(cyg_vector_t vector, cyg_ucount32 count, cyg_addrword_t data) { cyg_spi_freescale_dspi_bus_t* dspi_bus = (cyg_spi_freescale_dspi_bus_t*) data; cyg_drv_dsr_lock(); cyg_drv_cond_signal(&dspi_bus->transfer_done); cyg_drv_dsr_unlock(); }
//-------------------------------------- // Disable the transmitter on the device //-------------------------------------- static void mpc555_serial_stop_xmit(serial_channel * chan) { mpc555_serial_info * mpc555_chan = (mpc555_serial_info *)chan->dev_priv; cyg_drv_dsr_lock(); mpc555_chan->tx_interrupt_enable = false; cyg_drv_interrupt_mask(mpc555_chan->tx_interrupt_num); cyg_drv_dsr_unlock(); }
// Enable the transmitter (interrupt) on the device static void mpc8xxx_sxx_serial_start_xmit(serial_channel *chan) { mpc8xxx_sxx_serial_info *smc_chan = (mpc8xxx_sxx_serial_info *)chan->dev_priv; cyg_drv_dsr_lock(); if (smc_chan->txbd->length == 0) { // See if there is anything to put in this buffer, just to get it going (chan->callbacks->xmt_char)(chan); } if (smc_chan->txbd->length != 0) { // Make sure it gets started mpc8xxx_sxx_serial_flush(smc_chan); } cyg_drv_dsr_unlock(); }
Cyg_ErrNo usbs_devtab_cwrite(cyg_io_handle_t handle, const void* buf, cyg_uint32* size) { usbs_callback_data wait; cyg_devtab_entry_t* devtab_entry; usbs_tx_endpoint* endpoint; int result = ENOERR; CYG_REPORT_FUNCTION(); wait.completed = 0; cyg_drv_mutex_init(&wait.lock); cyg_drv_cond_init(&wait.signal, &wait.lock); devtab_entry = (cyg_devtab_entry_t*) handle; CYG_CHECK_DATA_PTR( devtab_entry, "A valid endpoint must be supplied"); endpoint = (usbs_tx_endpoint*) devtab_entry->priv; CYG_CHECK_DATA_PTR( endpoint, "The handle must correspond to a USB endpoint"); CYG_CHECK_FUNC_PTR( endpoint->start_tx_fn, "The endpoint must have a start_tx function"); endpoint->buffer = (unsigned char*) buf; endpoint->buffer_size = (int) *size; endpoint->complete_fn = &usbs_devtab_callback; endpoint->complete_data = (void*) &wait; (*endpoint->start_tx_fn)(endpoint); cyg_drv_mutex_lock(&wait.lock); cyg_drv_dsr_lock(); while (!wait.completed) { cyg_drv_cond_wait(&wait.signal); } cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&wait.lock); if (wait.result < 0) { result = wait.result; } else { *size = wait.result; } cyg_drv_cond_destroy(&wait.signal); cyg_drv_mutex_destroy(&wait.lock); CYG_REPORT_RETURN(); return result; }
void usbs_eth_start_rx(usbs_eth* eth, unsigned char* buf, void (*callback_fn)(usbs_eth*, void*, int), void* callback_arg) { eth->rx_callback_fn = callback_fn; eth->rx_callback_arg = callback_arg; cyg_drv_dsr_lock(); if (eth->host_up) { eth->rx_endpoint->buffer = buf; eth->rx_endpoint->buffer_size = CYGNUM_USBS_ETH_RXSIZE; eth->rx_endpoint->complete_fn = &usbs_eth_rx_callback; eth->rx_endpoint->complete_data = (void*) eth; (*(eth->rx_endpoint->start_rx_fn))(eth->rx_endpoint); } else { CYG_ASSERT( (void*) 0 == eth->rx_pending_buf, "No RX operation should be in progress"); eth->rx_pending_buf = buf; } cyg_drv_dsr_unlock(); }
void usbs_eth_start_tx(usbs_eth* eth, unsigned char* buf, void (*callback_fn)(usbs_eth*, void*, int), void* callback_arg) { int size; cyg_bool address_ok = false; static const unsigned char broadcast_mac[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; size = buf[0] + (buf[1] << 8); CYG_ASSERT( (size < 0) || ((size >= CYGNUM_USBS_ETH_MIN_FRAME_SIZE) && (size <= CYGNUM_USBS_ETH_MAX_FRAME_SIZE)), \ "ethernet frame size constraints must be observed"); if ((0 == memcmp(buf + 2, eth->host_MAC, 6)) || (0 == memcmp(buf + 2, broadcast_mac, 6))) { address_ok = true; } // The following checks involve data that can change as a result // of control operations, so it is necessary to synchronize with // those. The control operations will typically run at DSR level // so a DSR lock has to be used. cyg_drv_dsr_lock(); if (eth->host_up && (address_ok || eth->host_promiscuous)) { eth->tx_callback_fn = callback_fn; eth->tx_callback_arg = callback_arg; eth->tx_endpoint->buffer = buf; eth->tx_endpoint->buffer_size = size + 2; eth->tx_endpoint->complete_fn = &usbs_eth_tx_callback; eth->tx_endpoint->complete_data = (void*) eth; (*(eth->tx_endpoint->start_tx_fn))(eth->tx_endpoint); } else { // Packets not intended for the host can be discarded quietly. // A broken connection needs to be reported. (*callback_fn)(eth, callback_arg, eth->host_up ? size : -EPIPE); } cyg_drv_dsr_unlock(); }
static void eth_drv_send(struct netif *netif, struct pbuf *p) { struct eth_drv_sg sg_list[MAX_ETH_DRV_SG]; struct eth_drv_sc *sc = netif->state; int sg_len = 0; struct pbuf *q; #ifdef _LOCK_WITH_ROM_MONITOR bool need_lock = false; int debug_chan; #endif while (!(sc->funs->can_send) (sc)); for (q = p; q != NULL; q = q->next) { sg_list[sg_len].buf = (CYG_ADDRESS) q->payload; sg_list[sg_len++].len = q->len; } #ifdef _LOCK_WITH_ROM_MONITOR debug_chan = CYGACC_CALL_IF_SET_DEBUG_COMM(CYGNUM_CALL_IF_SET_COMM_ID_QUERY_CURRENT); if (debug_chan == RedBoot_TCP_CHANNEL) { need_lock = true; cyg_drv_dsr_lock(); } #endif // _LOCK_WITH_ROM_MONITOR (sc->funs->send) (sc, sg_list, sg_len, p->tot_len, (CYG_ADDRWORD) p); #ifdef _LOCK_WITH_ROM_MONITOR // Unlock the driver & hardware. It can once again be safely shared. if (need_lock) { cyg_drv_dsr_unlock(); } #endif // _LOCK_WITH_ROM_MONITOR }
// Send a character to the device output buffer. // Return 'true' if character is sent to device static bool quicc_sxx_serial_putc(serial_channel *chan, unsigned char c) { quicc_sxx_serial_info *smc_chan = (quicc_sxx_serial_info *)chan->dev_priv; volatile struct cp_bufdesc *txbd, *txfirst; volatile struct smc_uart_pram *pram = (volatile struct smc_uart_pram *)smc_chan->pram; EPPC *eppc = eppc_base(); bool res; cyg_drv_dsr_lock(); // Avoid race condition testing pointers txbd = (struct cp_bufdesc *)((char *)eppc + pram->tbptr); txfirst = txbd; // Scan for a non-busy buffer while (txbd->ctrl & QUICC_BD_CTL_Ready) { // This buffer is busy, move to next one if (txbd->ctrl & QUICC_BD_CTL_Wrap) { txbd = smc_chan->tbase; } else { txbd++; } if (txbd == txfirst) break; // Went all the way around } smc_chan->txbd = txbd; if ((txbd->ctrl & (QUICC_BD_CTL_Ready|QUICC_BD_CTL_Int)) == 0) { // Transmit buffer is not full/busy txbd->buffer[txbd->length++] = c; if (txbd->length == smc_chan->txsize) { // This buffer is now full, tell SMC to start processing it quicc_sxx_serial_flush(smc_chan); } res = true; } else { // No space res = false; } cyg_drv_dsr_unlock(); return res; }
static Cyg_ErrNo serial_set_config(cyg_io_handle_t handle, cyg_uint32 key, const void *xbuf, cyg_uint32 *len) { Cyg_ErrNo res = ENOERR; cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; serial_channel *chan = (serial_channel *)t->priv; #ifdef CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING cbuf_t *out_cbuf = &chan->out_cbuf; cbuf_t *in_cbuf = &chan->in_cbuf; #endif serial_funs *funs = chan->funs; switch (key) { #ifdef CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING case CYG_IO_SET_CONFIG_READ_BLOCKING: if (*len < sizeof(cyg_uint32) || 0 == in_cbuf->len) { return -EINVAL; } in_cbuf->blocking = (1 == *(cyg_uint32*)xbuf) ? true : false; break; case CYG_IO_SET_CONFIG_WRITE_BLOCKING: if (*len < sizeof(cyg_uint32) || 0 == out_cbuf->len) { return -EINVAL; } out_cbuf->blocking = (1 == *(cyg_uint32*)xbuf) ? true : false; break; #endif // CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING #ifdef CYGPKG_IO_SERIAL_FLOW_CONTROL case CYG_IO_SET_CONFIG_SERIAL_FLOW_CONTROL_METHOD: { cyg_uint32 *f = (cyg_uint32 *)xbuf; if (*len < sizeof(*f)) return -EINVAL; cyg_drv_dsr_lock(); chan->config.flags &= ~(CYGNUM_SERIAL_FLOW_XONXOFF_RX| CYGNUM_SERIAL_FLOW_XONXOFF_TX| CYGNUM_SERIAL_FLOW_RTSCTS_RX| CYGNUM_SERIAL_FLOW_RTSCTS_TX| CYGNUM_SERIAL_FLOW_DSRDTR_RX| CYGNUM_SERIAL_FLOW_DSRDTR_TX); chan->config.flags |= (*f & ( #ifdef CYGOPT_IO_SERIAL_FLOW_CONTROL_SOFTWARE CYGNUM_SERIAL_FLOW_XONXOFF_RX| CYGNUM_SERIAL_FLOW_XONXOFF_TX| #endif #ifdef CYGOPT_IO_SERIAL_FLOW_CONTROL_HW CYGNUM_SERIAL_FLOW_RTSCTS_RX| CYGNUM_SERIAL_FLOW_RTSCTS_TX| CYGNUM_SERIAL_FLOW_DSRDTR_RX| CYGNUM_SERIAL_FLOW_DSRDTR_TX| #endif 0)); #ifdef CYGOPT_IO_SERIAL_FLOW_CONTROL_HW // up to hardware driver to clear flags if rejected res = (funs->set_config)(chan, CYG_IO_SET_CONFIG_SERIAL_HW_FLOW_CONFIG, NULL, NULL); #endif cyg_drv_dsr_unlock(); } break; case CYG_IO_SET_CONFIG_SERIAL_FLOW_CONTROL_FORCE: { cyg_uint32 *f = (cyg_uint32 *)xbuf; if (*len < sizeof(*f)) return -EINVAL; cyg_drv_dsr_lock(); switch (*f) { case CYGNUM_SERIAL_FLOW_THROTTLE_RX: throttle_rx( chan, true ); break; case CYGNUM_SERIAL_FLOW_RESTART_RX: restart_rx( chan, true ); break; case CYGNUM_SERIAL_FLOW_THROTTLE_TX: throttle_tx( chan ); break; case CYGNUM_SERIAL_FLOW_RESTART_TX: restart_tx( chan ); break; default: res = -EINVAL; break; } cyg_drv_dsr_unlock(); } break; #endif // CYGPKG_IO_SERIAL_FLOW_CONTROL #ifdef CYGOPT_IO_SERIAL_SUPPORT_LINE_STATUS case CYG_IO_SET_CONFIG_SERIAL_STATUS_CALLBACK: { cyg_serial_line_status_callback_fn_t newfn; CYG_ADDRWORD newpriv; cyg_serial_line_status_callback_t *tmp = (cyg_serial_line_status_callback_t *)xbuf; if ( *len < sizeof(*tmp) ) return -EINVAL; newfn = tmp->fn; newpriv = tmp->priv; // prevent callbacks while we do this cyg_drv_dsr_lock(); // store old callbacks in same structure tmp->fn = chan->status_callback; tmp->priv = chan->status_callback_priv; chan->status_callback = newfn; chan->status_callback_priv = newpriv; cyg_drv_dsr_unlock(); *len = sizeof(*tmp); } break; #endif default: // pass down to lower layers return (funs->set_config)(chan, key, xbuf, len); } return res; }
static Cyg_ErrNo serial_get_config(cyg_io_handle_t handle, cyg_uint32 key, void *xbuf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; serial_channel *chan = (serial_channel *)t->priv; cyg_serial_info_t *buf = (cyg_serial_info_t *)xbuf; Cyg_ErrNo res = ENOERR; cbuf_t *out_cbuf = &chan->out_cbuf; cbuf_t *in_cbuf = &chan->in_cbuf; serial_funs *funs = chan->funs; switch (key) { case CYG_IO_GET_CONFIG_SERIAL_INFO: if (*len < sizeof(cyg_serial_info_t)) { return -EINVAL; } *buf = chan->config; *len = sizeof(chan->config); break; case CYG_IO_GET_CONFIG_SERIAL_BUFFER_INFO: // return rx/tx buffer sizes and counts { cyg_serial_buf_info_t *p; if (*len < sizeof(cyg_serial_buf_info_t)) return -EINVAL; *len = sizeof(cyg_serial_buf_info_t); p = (cyg_serial_buf_info_t *)xbuf; p->rx_bufsize = in_cbuf->len; if (p->rx_bufsize) p->rx_count = in_cbuf->nb; else p->rx_count = 0; p->tx_bufsize = out_cbuf->len; if (p->tx_bufsize) p->tx_count = out_cbuf->nb; else p->tx_count = 0; } break; case CYG_IO_GET_CONFIG_SERIAL_OUTPUT_DRAIN: // Wait for any pending output to complete if (out_cbuf->len == 0) break; // Nothing to do if not buffered cyg_drv_mutex_lock(&out_cbuf->lock); // Stop any further output processing cyg_drv_dsr_lock();//dsr lock, it will lead to dsr interrupt can't be called; and serial_xmt_char can't be called forever while (out_cbuf->pending || (out_cbuf->nb > 0)) { //clyu //these codes will race with serial_xmt_char(cyg_drv_cond_broadcast) //if modify w90n740_serial_putc(...) return true always, these code will not run out_cbuf->waiting = true; if(!cyg_drv_cond_wait(&out_cbuf->wait) ) res = -EINTR; } cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&out_cbuf->lock); break; case CYG_IO_GET_CONFIG_SERIAL_INPUT_FLUSH: // Flush any buffered input if (in_cbuf->len == 0) break; // Nothing to do if not buffered cyg_drv_mutex_lock(&in_cbuf->lock); // Stop any further input processing cyg_drv_dsr_lock(); if (in_cbuf->waiting) { in_cbuf->abort = true; cyg_drv_cond_signal(&in_cbuf->wait); in_cbuf->waiting = false; } in_cbuf->get = in_cbuf->put = in_cbuf->nb = 0; // Flush buffered input cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&in_cbuf->lock); break; case CYG_IO_GET_CONFIG_SERIAL_ABORT: // Abort any outstanding I/O, including blocked reads // Caution - assumed to be called from 'timeout' (i.e. DSR) code if (in_cbuf->len != 0) { in_cbuf->abort = true; cyg_drv_cond_signal(&in_cbuf->wait); } if (out_cbuf->len != 0) { out_cbuf->abort = true; cyg_drv_cond_signal(&out_cbuf->wait); } break; case CYG_IO_GET_CONFIG_SERIAL_OUTPUT_FLUSH: // Throw away any pending output if (out_cbuf->len == 0) break; // Nothing to do if not buffered cyg_drv_mutex_lock(&out_cbuf->lock); // Stop any further output processing cyg_drv_dsr_lock(); if (out_cbuf->nb > 0) { out_cbuf->get = out_cbuf->put = out_cbuf->nb = 0; // Empties queue! (funs->stop_xmit)(chan); // Done with transmit } if (out_cbuf->waiting) { out_cbuf->abort = true; cyg_drv_cond_signal(&out_cbuf->wait); out_cbuf->waiting = false; } cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&out_cbuf->lock); break; #ifdef CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING case CYG_IO_GET_CONFIG_READ_BLOCKING: if (*len < sizeof(cyg_uint32)) { return -EINVAL; } *(cyg_uint32*)xbuf = (in_cbuf->blocking) ? 1 : 0; break; case CYG_IO_GET_CONFIG_WRITE_BLOCKING: if (*len < sizeof(cyg_uint32)) { return -EINVAL; } *(cyg_uint32*)xbuf = (out_cbuf->blocking) ? 1 : 0; break; #endif // CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING default: res = -EINVAL; } return res; }
static Cyg_ErrNo serial_read(cyg_io_handle_t handle, void *_buf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; serial_channel *chan = (serial_channel *)t->priv; serial_funs *funs = chan->funs; cyg_uint8 *buf = (cyg_uint8 *)_buf; cyg_int32 size = 0; cbuf_t *cbuf = &chan->in_cbuf; Cyg_ErrNo res = ENOERR; #ifdef XX_CYGDBG_DIAG_BUF extern int enable_diag_uart; int _enable = enable_diag_uart; int _time, _stime; externC cyg_tick_count_t cyg_current_time(void); #endif // CYGDBG_DIAG_BUF cyg_drv_mutex_lock(&cbuf->lock); cbuf->abort = false; if (cbuf->len == 0) { // Non interrupt driven (i.e. polled) operation while (size++ < *len) { cyg_uint8 c = (funs->getc)(chan); #ifdef CYGOPT_IO_SERIAL_FLOW_CONTROL_SOFTWARE // for software flow control, if the driver returns one of the // characters we act on it and then drop it (the app must not // see it) if ( chan->config.flags & CYGNUM_SERIAL_FLOW_XONXOFF_TX ) { if ( c == CYGDAT_IO_SERIAL_FLOW_CONTROL_XOFF_CHAR ) { throttle_tx( chan ); } else if ( c == CYGDAT_IO_SERIAL_FLOW_CONTROL_XON_CHAR ) { restart_tx( chan ); } else *buf++ = c; } else *buf++ = c; #else *buf++ = c; #endif } } else { cyg_drv_dsr_lock(); // Avoid races while (size < *len) { if (cbuf->nb > 0) { #ifdef CYGPKG_IO_SERIAL_FLOW_CONTROL if ( (cbuf->nb <= cbuf->low_water) && (chan->flow_desc.flags & CYG_SERIAL_FLOW_IN_THROTTLED) ) restart_rx( chan, false ); #endif *buf++ = cbuf->data[cbuf->get]; if (++cbuf->get == cbuf->len) cbuf->get = 0; cbuf->nb--; size++; } else { #ifdef CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING if (!cbuf->blocking) { *len = size; // characters actually read res = -EAGAIN; break; } #endif // CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING cbuf->waiting = true; #ifdef XX_CYGDBG_DIAG_BUF enable_diag_uart = 0; HAL_CLOCK_READ(&_time); _stime = (int)cyg_current_time(); diag_printf("READ wait - get: %d, put: %d, time: %x.%x\n", cbuf->get, cbuf->put, _stime, _time); enable_diag_uart = _enable; #endif // CYGDBG_DIAG_BUF if( !cyg_drv_cond_wait(&cbuf->wait) ) cbuf->abort = true; #ifdef XX_CYGDBG_DIAG_BUF enable_diag_uart = 0; HAL_CLOCK_READ(&_time); _stime = (int)cyg_current_time(); diag_printf("READ continue - get: %d, put: %d, time: %x.%x\n", cbuf->get, cbuf->put, _stime, _time); enable_diag_uart = _enable; #endif // CYGDBG_DIAG_BUF if (cbuf->abort) { // Give up! *len = size; // characters actually read cbuf->abort = false; cbuf->waiting = false; res = -EINTR; break; } } } cyg_drv_dsr_unlock(); } #ifdef XX_CYGDBG_DIAG_BUF cyg_drv_isr_lock(); enable_diag_uart = 0; HAL_CLOCK_READ(&_time); _stime = (int)cyg_current_time(); diag_printf("READ done - size: %d, len: %d, time: %x.%x\n", size, *len, _stime, _time); enable_diag_uart = _enable; cyg_drv_isr_unlock(); #endif // CYGDBG_DIAG_BUF cyg_drv_mutex_unlock(&cbuf->lock); return res; }
static Cyg_ErrNo serial_write(cyg_io_handle_t handle, const void *_buf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; serial_channel *chan = (serial_channel *)t->priv; serial_funs *funs = chan->funs; cyg_int32 size = *len; cyg_uint8 *buf = (cyg_uint8 *)_buf; int next; cbuf_t *cbuf = &chan->out_cbuf; Cyg_ErrNo res = ENOERR; //diag_printf("serial_write cbuf->len = %d\n",cbuf->len); cyg_drv_mutex_lock(&cbuf->lock); cbuf->abort = false; if (cbuf->len == 0) { // Non interrupt driven (i.e. polled) operation while (size-- > 0) { #ifdef CYGPKG_IO_SERIAL_FLOW_CONTROL while ( ( 0 == (chan->flow_desc.flags & CYG_SERIAL_FLOW_OUT_THROTTLED) ) && ((funs->putc)(chan, *buf) == false) ) ; // Ignore full, keep trying #else while ((funs->putc)(chan, *buf) == false) ; // Ignore full, keep trying #endif buf++; } } else { cyg_drv_dsr_lock(); // Avoid race condition testing pointers while (size > 0) { next = cbuf->put + 1; if (next == cbuf->len) next = 0; if (cbuf->nb == cbuf->len) { cbuf->waiting = true; // Buffer full - wait for space #ifdef CYGPKG_IO_SERIAL_FLOW_CONTROL if ( 0 == (chan->flow_desc.flags & CYG_SERIAL_FLOW_OUT_THROTTLED) ) #endif (funs->start_xmit)(chan); // Make sure xmit is running // Check flag: 'start_xmit' may have obviated the need // to wait :-) if (cbuf->waiting) { #ifdef CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING // Optionally return if configured for non-blocking mode. if (!cbuf->blocking) { *len -= size; // number of characters actually sent cbuf->waiting = false; res = -EAGAIN; break; } #endif // CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING cbuf->pending += size; // Have this much more to send [eventually] if( !cyg_drv_cond_wait(&cbuf->wait) ) cbuf->abort = true; cbuf->pending -= size; } if (cbuf->abort) { // Give up! *len -= size; // number of characters actually sent cbuf->abort = false; cbuf->waiting = false; res = -EINTR; break; } } else { cbuf->data[cbuf->put++] = *buf++; cbuf->put = next; cbuf->nb++; size--; // Only count if actually sent! } } #ifdef CYGPKG_IO_SERIAL_FLOW_CONTROL if ( 0 == (chan->flow_desc.flags & CYG_SERIAL_FLOW_OUT_THROTTLED) ) #endif (funs->start_xmit)(chan); // Start output as necessary cyg_drv_dsr_unlock(); } cyg_drv_mutex_unlock(&cbuf->lock); return res; }
static void #else static int #endif sc_lpe_card_handler(cyg_addrword_t param) { struct eth_drv_sc *sc = (struct eth_drv_sc *)param; dp83902a_priv_data_t *dp = (dp83902a_priv_data_t*)sc->driver_private; struct cf_slot *slot; struct cf_cftable cftable; struct cf_config config; int i, len, ptr, cor = 0; unsigned char buf[256], *cp; cyg_uint8* base; unsigned char *vers_product, *vers_manuf, *vers_revision, *vers_date; #ifndef CYGPKG_KERNEL int tries = 0; #endif bool first = true; slot = (struct cf_slot*)dp->plf_priv; cyg_drv_dsr_lock(); while (true) { cyg_drv_dsr_unlock(); // Give DSRs a chance to run (card insertion) cyg_drv_dsr_lock(); if ((slot->state == CF_SLOT_STATE_Inserted) || ((slot->state == CF_SLOT_STATE_Ready) && first)) { first = false; if (slot->state != CF_SLOT_STATE_Ready) { cf_change_state(slot, CF_SLOT_STATE_Ready); } if (slot->state != CF_SLOT_STATE_Ready) { diag_printf("CF card won't go ready!\n"); #ifndef CYGPKG_KERNEL return false; #else continue; #endif } len = sizeof(buf); ptr = 0; if (cf_get_CIS(slot, CF_CISTPL_MANFID, buf, &len, &ptr)) { if (*(short *)&buf[2] != SC_LPE_MANUF) { diag_printf("Not a SC LPE, sorry\n"); continue; } } ptr = 0; if (cf_get_CIS(slot, CF_CISTPL_VERS_1, buf, &len, &ptr)) { // Find individual strings cp = &buf[4]; vers_product = cp; while (*cp++) ; // Skip to nul vers_manuf = cp; while (*cp++) ; // Skip to nul vers_revision = cp; while (*cp++) ; // Skip to nul vers_date = cp; #ifndef CYGPKG_KERNEL if (tries != 0) diag_printf("\n"); diag_printf("%s: %s %s %s\n", vers_manuf, vers_product, vers_revision, vers_date); #endif } ptr = 0; if (cf_get_CIS(slot, CF_CISTPL_CONFIG, buf, &len, &ptr)) { if (cf_parse_config(buf, len, &config)) { cor = config.base; } } if (!cor) { // diag_printf("Couldn't find COR pointer!\n"); continue; } ptr = 0; if (cf_get_CIS(slot, CF_CISTPL_CFTABLE_ENTRY, buf, &len, &ptr)) { if (cf_parse_cftable(buf, len, &cftable)) { cyg_uint8 tmp; // Initialize dp83902a IO details dp->base = base = (cyg_uint8*)&slot->io[cftable.io_space.base[0]]; dp->data = base + DP_DATA; dp->interrupt = slot->int_num; cf_set_COR(slot, cor, cftable.cor); // Reset card (read issues RESET, write clears it) HAL_READ_UINT8(base+DP_CARD_RESET, tmp); HAL_WRITE_UINT8(base+DP_CARD_RESET, tmp); // Wait for card do { DP_IN(base, DP_ISR, tmp); } while (0 == (tmp & DP_ISR_RESET)); // Fetch hardware address from card - terrible, but not well defined // Patterned after what Linux drivers do if (!dp->hardwired_esa) { static unsigned char sc_lpe_addr[] = { 0x00, 0xC0, 0x1B, 0x00, 0x99, 0x9E}; if ((slot->attr[0x1C0] == sc_lpe_addr[0]) && (slot->attr[0x1C2] == sc_lpe_addr[1]) && (slot->attr[0x1C4] == sc_lpe_addr[2])) { sc_lpe_addr[3] = slot->attr[0x1C6]; sc_lpe_addr[4] = slot->attr[0x1C8]; sc_lpe_addr[5] = slot->attr[0x1CA]; } else { // Coudn't find it in the CIS (attribute) data unsigned char prom[32]; // Tell device to give up ESA DP_OUT(base, DP_DCR, 0x48); // Bytewide access DP_OUT(base, DP_RBCH, 0); // Remote byte count DP_OUT(base, DP_RBCL, 0); DP_OUT(base, DP_ISR, 0xFF); // Clear any pending interrupts DP_OUT(base, DP_IMR, 0x00); // Mask all interrupts DP_OUT(base, DP_RCR, 0x20); // Monitor DP_OUT(base, DP_TCR, 0x02); // loopback DP_OUT(base, DP_RBCH, 32); // Remote byte count DP_OUT(base, DP_RBCL, 0); DP_OUT(base, DP_RSAL, 0); // Remote address DP_OUT(base, DP_RSAH, 0); DP_OUT(base, DP_CR, DP_CR_START|DP_CR_RDMA); // Read data for (i = 0; i < 32; i++) { HAL_READ_UINT8(base+DP_DATAPORT, prom[i]); } if ((prom[0] == sc_lpe_addr[0]) && (prom[2] == sc_lpe_addr[1]) && (prom[4] == sc_lpe_addr[2])) { diag_printf("Getting address from port\n"); sc_lpe_addr[3] = prom[6]; sc_lpe_addr[4] = prom[8]; sc_lpe_addr[5] = prom[10]; } else { diag_printf("No valid ESA found in CIS! Hardwiring to 00:C0:1B:00:99:9E\n"); } } for (i = 0; i < 6; i++) { dp->esa[i] = sc_lpe_addr[i]; } } // Initialize upper level driver (sc->funs->eth_drv->init)(sc, dp->esa); // Tell system card is ready to talk dp->tab->status = CYG_NETDEVTAB_STATUS_AVAIL; #ifndef CYGPKG_KERNEL cyg_drv_dsr_unlock(); return true; #endif } else { diag_printf("Can't parse CIS\n"); continue; } } else { diag_printf("Can't fetch config info\n"); continue; } } else if (slot->state == CF_SLOT_STATE_Removed) { diag_printf("Compact Flash card removed!\n"); } else { cyg_drv_dsr_unlock(); do_delay(50); // FIXME! #ifndef CYGPKG_KERNEL if (tries == 0) diag_printf("... Waiting for network card: "); diag_printf("."); if (++tries == 10) { // 5 seconds have elapsed - give up return false; } cf_hwr_poll(slot); // Check to see if card has been inserted #endif cyg_drv_dsr_lock(); } } }
static void spi_transaction_do (cyg_spi_device* device, cyg_bool tick_only, cyg_bool polled, cyg_uint32 count, const cyg_uint8* tx_data, cyg_uint8* rx_data, cyg_bool drop_cs) { cyg_spi_freescale_dspi_bus_t* dspi_bus = (cyg_spi_freescale_dspi_bus_t*) device->spi_bus; cyg_spi_freescale_dspi_device_t* dspi_device = (cyg_spi_freescale_dspi_device_t*) device; cyg_bool bus_16bit = dspi_device->clocking.bus_16bit; cyghwr_devs_freescale_dspi_t* dspi_p = dspi_bus->setup_p->dspi_p; cyghwr_hal_freescale_dma_set_t* dma_set_p; cyghwr_hal_freescale_edma_t* edma_p = NULL; cyg_uint32 count_down; cyg_uint32 txfifo_n = dspi_bus->txfifo_n; cyg_uint32 pushr; cyg_uint32 pushque_n; cyg_uint32 dma_chan_rx_i = 0; cyg_uint32 dma_chan_tx_i = 0; cyg_uint8* rx_data0; #if DEBUG_SPI >= 2 cyg_uint32 first_turn = 1; #endif DEBUG2_PRINTF("DSPI: transaction: count=%d drop_cs=%d tick_only=%d\n", count, drop_cs, tick_only); // Set up peripheral CS field. DSPI automatically asserts and deasserts CS pushr = #ifndef CYGOPT_DEVS_SPI_FREESCALE_DSPI_TICK_ONLY_DROPS_CS // Compatibility option // eCos Reference Manual states that CS should drop prior to sending // ticks, but other SPI drivers do not touch the CS. tick_only ? dspi_p->pushr & 0x87FF0000 : #endif dspi_chip_select_set( #ifdef CYGOPT_DEVS_SPI_FREESCALE_DSPI_TICK_ONLY_DROPS_CS // Compatibility option. See comment above. tick_only ? -1 : #endif dspi_device->dev_num, dspi_p->mcr & FREESCALE_DSPI_MCR_PCSSE_M, true); pushr |= FREESCALE_DSPI_PUSHR_CONT_M; dspi_fifo_clear(dspi_p); pushque_n = dspi_bus->pushque_n; if(bus_16bit) txfifo_n *= 2; dma_set_p = dspi_bus->setup_p->dma_set_p; if((count > txfifo_n) && dma_set_p) { rx_data0 = rx_data; edma_p = dma_set_p->edma_p; // Set up the DMA channels. dma_chan_rx_i = SPI_DMA_CHAN_I(dma_set_p, RX); dma_chan_tx_i = SPI_DMA_CHAN_I(dma_set_p, TX); rx_dma_channel_setup(dma_set_p, (cyg_uint8*) rx_data, bus_16bit, &edma_p->tcd[dma_chan_rx_i]); hal_freescale_edma_erq_enable(edma_p, dma_chan_rx_i); dspi_irq_enable(dspi_p, FREESCALE_DSPI_RSER_TFFF_RE_M | FREESCALE_DSPI_RSER_RFDF_RE_M | FREESCALE_DSPI_RSER_TFFF_DIRS_M | FREESCALE_DSPI_RSER_RFDF_DIRS_M); } else { rx_data0 = NULL; // If byte count fits in the FIFO don't bother with DMA. if(dma_set_p) { edma_p = dma_set_p->edma_p; hal_freescale_edma_erq_disable(edma_p, SPI_DMA_CHAN_I(dma_set_p, RX)); } dma_set_p = NULL; dspi_irq_disable(dspi_p, FREESCALE_DSPI_RSER_TFFF_RE_M | FREESCALE_DSPI_RSER_RFDF_RE_M | FREESCALE_DSPI_RSER_TFFF_DIRS_M | FREESCALE_DSPI_RSER_RFDF_DIRS_M); } if(!polled) cyg_drv_interrupt_unmask(dspi_bus->setup_p->intr_num); count_down = count; while(count_down) { #if DEBUG_SPI >= 2 if(first_turn) { if(dspi_bus->pushque_p) dspi_bus->pushque_p[0] |= FREESCALE_DSPI_PUSHR_CTCNT_M; first_turn = 0; } #endif if(dma_set_p && (count_down > txfifo_n)) { // Transfer size is larger than DSPI FIFO // Use DMA Tx count_down = tx_dma_channel_setup(dspi_bus, (cyg_uint8*) tx_data, count_down, bus_16bit, pushr, drop_cs); #if DEBUG_SPI >= 3 hal_freescale_edma_transfer_diag(edma_p, dma_chan_rx_i, true); #endif // Enable the Tx DMA / SPI controller. hal_freescale_edma_erq_enable(edma_p, dma_chan_tx_i); DSPI_EOQ_CLEAR(dspi_p); } else { // Transfer size fits within DSPI FIFO // No need for DMA Tx DSPI_EOQ_CLEAR(dspi_p); count_down = fifo_pushque_fill(dspi_bus, (cyg_uint8*) tx_data, count_down, bus_16bit, pushr, drop_cs); #if DEBUG_SPI >= 3 cyghwr_devs_freescale_dspi_diag(dspi_bus); #endif } if(polled) { DEBUG2_PRINTF("DSPI Polled:\n"); // Busy-wait for DSPI/DMA (polling for completion). while(!(dspi_p->sr & FREESCALE_DSPI_SR_EOQF_M)); if(dma_set_p) { // Disable the Tx DMA channel on completion. hal_freescale_edma_erq_disable(edma_p, dma_chan_tx_i); } } else { // Wait for DSPI/DMA completion. (interrupt driven). cyg_drv_mutex_lock(&dspi_bus->transfer_mutex); cyg_drv_dsr_lock(); DSPI_IRQ_ENABLE(dspi_p); DEBUG2_PRINTF("DSPI IRQ: Enabled\n"); // Sit back and wait for the ISR/DSRs to signal completion. cyg_drv_cond_wait (&dspi_bus->transfer_done); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&dspi_bus->transfer_mutex); } if(dma_set_p) { // Make sure that Rx has been drained by DMA. while((dspi_p->sr & FREESCALE_DSPI_SR_RFDF_M)); DEBUG2_PRINTF("Fifo Drained by DMA 0x%08x\n", dspi_p->sr); if(count_down <= txfifo_n && count_down > 0) { hal_freescale_edma_erq_disable(edma_p, dma_chan_rx_i); dma_set_p = NULL; } } else { // No DMA - "manually" drain Rx FIFO DEBUG2_PRINTF("DSPI FIFO: 'Manually' drain Rx fifo rx_data=%p bus_16bit=%d\n", rx_data, bus_16bit); #if DEBUG_SPI >= 3 cyghwr_devs_freescale_dspi_diag(dspi_bus); #endif if(rx_data) { if(bus_16bit) { cyg_uint16* rx_data16 = (cyg_uint16*) rx_data; while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M) { DEBUG2_PRINTF(" Fifo Pull16 at %p\n", rx_data16); *rx_data16++ = dspi_p->popr; } rx_data = (cyg_uint8*) rx_data16; } else { while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M) { DEBUG2_PRINTF(" Fifo Pull at %p\n", rx_data); *rx_data++ = dspi_p->popr; } } } dspi_fifo_drain(dspi_p); } dspi_fifo_clear(dspi_p); // Prepare for next iteration if(tx_data) { tx_data += pushque_n; if(bus_16bit) tx_data += pushque_n; } } if(rx_data0) { // Rx buffer may be out of sync with cache. DEBUG2_PRINTF("DSPI DMA: Flush cache %p len=%d\n", rx_data0, count); HAL_DCACHE_INVALIDATE(rx_data0, count); DEBUG2_PRINTF("DSPI DMA: Cache flushed\n"); } if(!polled) cyg_drv_interrupt_mask(dspi_bus->setup_p->intr_num); dspi_device->chip_sel = !drop_cs; DEBUG2_PRINTF("cyg_transaction_do() chip_sel = %d drop_cs = %d\n", dspi_device->chip_sel, drop_cs); }
static void spi_transaction_do (cyg_spi_device* device, cyg_bool tick_only, cyg_bool polled, cyg_uint32 count, const cyg_uint8* tx_data, cyg_uint8* rx_data, cyg_bool drop_cs) { cyg_spi_freescale_dspi_bus_t* dspi_bus = (cyg_spi_freescale_dspi_bus_t*) device->spi_bus; cyg_spi_freescale_dspi_device_t* dspi_device = (cyg_spi_freescale_dspi_device_t*) device; cyg_bool bus_16bit = dspi_device->clocking.bus_16bit; cyghwr_devs_freescale_dspi_t* dspi_p = dspi_bus->setup_p->dspi_p; cyghwr_hal_freescale_dma_set_t* dma_set_p; cyghwr_hal_freescale_edma_t* edma_p = NULL; cyg_uint32 count_down; cyg_uint32 txfifo_n = dspi_bus->txfifo_n; cyg_uint32 pushr; cyg_uint32 pushque_n; cyg_uint32 dma_chan_rx_i = 0; cyg_uint32 dma_chan_tx_i = 0; #if DEBUG_SPI >= 2 cyg_uint32 first_turn = 1; #endif DEBUG2_PRINTF("DSPI: transaction: count=%d drop_cs=%d\n", count, drop_cs); // Set up peripheral CS field. DSPI automatically asserts and deasserts CS pushr = dspi_chip_select_set(tick_only ? -1 : dspi_device->dev_num, dspi_p->mcr & FREESCALE_DSPI_MCR_PCSSE_M, true); pushr |= FREESCALE_DSPI_PUSHR_CONT_M; dspi_fifo_clear(dspi_p); dspi_fifo_drain(dspi_p); pushque_n = dspi_bus->pushque_n; if(bus_16bit) txfifo_n *= 2; if((dma_set_p=dspi_bus->setup_p->dma_set_p)) { edma_p = dma_set_p->edma_p; // Set up the DMA channels. dma_chan_rx_i = SPI_DMA_CHAN_I(dma_set_p, RX); dma_chan_tx_i = SPI_DMA_CHAN_I(dma_set_p, TX); rx_dma_channel_setup(dma_set_p, (cyg_uint8*) rx_data, bus_16bit, &edma_p->tcd[dma_chan_rx_i]); hal_freescale_edma_erq_enable(edma_p, dma_chan_rx_i); } if(!polled) cyg_drv_interrupt_unmask(dspi_bus->setup_p->intr_num); count_down = count; while(count_down) { #if DEBUG_SPI >= 2 if(first_turn) { if(dspi_bus->pushque_p) dspi_bus->pushque_p[0] |= FREESCALE_DSPI_PUSHR_CTCNT_M; first_turn = 0; } #endif if(dma_set_p && (count_down > txfifo_n)) { // Transfer size is larger than DSPI FIFO // Use DMA Tx count_down = tx_dma_channel_setup(dspi_bus, (cyg_uint8*) tx_data, count_down, bus_16bit, pushr, drop_cs); #if DEBUG_SPI >= 3 hal_freescale_edma_transfer_diag(edma_p, dma_chan_rx_i, true); #endif // Enable the Tx DMA / SPI controller. hal_freescale_edma_erq_enable(edma_p, dma_chan_tx_i); DSPI_EOQ_CLEAR(dspi_p); } else { // Transfer size fits within DSPI FIFO // No need for DMA Tx DSPI_EOQ_CLEAR(dspi_p); count_down = fifo_pushque_fill(dspi_bus, (cyg_uint8*) tx_data, count_down, bus_16bit, pushr, drop_cs); #if DEBUG_SPI >= 3 cyghwr_devs_freescale_dspi_diag(dspi_bus); #endif } if(polled) { DEBUG2_PRINTF("DSPI Polled:\n"); // Busy-wait for DSPI/DMA (polling for completion). while(!(dspi_p->sr & FREESCALE_DSPI_SR_EOQF_M)); if(dma_set_p) // Disable the Tx DMA channel on completion. hal_freescale_edma_erq_disable(edma_p, dma_chan_tx_i); } else { // Wait for DSPI/DMA completion. (interrupt driven). cyg_drv_mutex_lock(&dspi_bus->transfer_mutex); cyg_drv_dsr_lock(); DSPI_IRQ_ENABLE(dspi_p); DEBUG2_PRINTF("DSPI IRQ: Enabled\n"); // Sit back and wait for the ISR/DSRs to signal completion. cyg_drv_cond_wait (&dspi_bus->transfer_done); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&dspi_bus->transfer_mutex); } if(dma_set_p) { // Make sure that Rx has been drained by DMA. if(rx_data) while((dspi_p->sr & FREESCALE_DSPI_SR_RFDF_M)); } else { // No DMA - "manually" drain Rx FIFO DEBUG2_PRINTF("DSPI FIFO: 'Manually' drain Rx fifo\n"); #if DEBUG_SPI >= 3 cyghwr_devs_freescale_dspi_diag(dspi_bus); #endif if(rx_data) { if(bus_16bit) { cyg_uint16* rx_data16 = (cyg_uint16*) rx_data; while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M) *rx_data16++ = dspi_p->popr; rx_data = (cyg_uint8*) rx_data16; } else { while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M) *rx_data++ = dspi_p->popr; } } else { dspi_fifo_drain(dspi_p); } } dspi_fifo_clear(dspi_p); // Prepare for next iteration if(tx_data) { tx_data += pushque_n; if(bus_16bit) tx_data += pushque_n; } } if(dma_set_p && rx_data) { // Rx buffer may be out of sync with cache. DEBUG2_PRINTF("DSPI DMA: Invalidate cache\n"); HAL_DCACHE_INVALIDATE(rx_data, count); DEBUG2_PRINTF("DSPI DMA: Cache invalidated\n"); } if(!polled) cyg_drv_interrupt_mask(dspi_bus->setup_p->intr_num); dspi_device->chip_sel = !drop_cs; }
//=========================================================================== // Write exactly one CAN message to CAN bus //=========================================================================== static Cyg_ErrNo can_write(cyg_io_handle_t handle, const void *_buf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; can_channel *chan = (can_channel *)t->priv; can_lowlevel_funs *funs = chan->funs; Cyg_ErrNo res = ENOERR; can_cbuf_t *cbuf = &chan->out_cbuf; cyg_uint32 size = *len; // // the user need to provide a can message buffer // if (*len != sizeof(cyg_can_message)) { return -EINVAL; } cyg_drv_mutex_lock(&cbuf->lock); cbuf->abort = false; cyg_drv_dsr_lock(); // avoid race condition while testing pointers while (size > 0) { if (cbuf->data_cnt == cbuf->len) { cbuf->waiting = true; // Buffer full - wait for space funs->start_xmit(chan); // Make sure xmit is running // // Check flag: 'start_xmit' may have obviated the need // to wait // if (cbuf->waiting) { cbuf->pending += size; // Have this much more to send [eventually] #if defined(CYGOPT_IO_CAN_SUPPORT_NONBLOCKING) #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) // // If timeouts are enabled and we use nonblocking calls then we // can use the timeout values // if (!cbuf->blocking) { if(!CYG_DRV_COND_WAIT(&cbuf->wait, cbuf->timeout)) { cbuf->abort = true; } } // if (!cbuf->blocking)# else #else // #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) // // if this is a nonblocking call then we return immediatelly // if (!cbuf->blocking) { *len = 0; res = -EAGAIN; break; } else #endif // #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) #endif //defined(CYGOPT_IO_CAN_SUPPORT_NONBLOCKING) { if(!cyg_drv_cond_wait(&cbuf->wait)) { cbuf->abort = true; } } cbuf->pending -= size; if (cbuf->abort) { // Give up! *len -= size; // number of characters actually sent cbuf->abort = false; cbuf->waiting = false; res = -EINTR; break; } // if (cbuf->abort) } // if (cbuf->waiting) } // if (cbuf->data_cnt == cbuf->len) else { // // there is enougth space left so we can store additional data // CYG_CAN_MSG_T *ptxbuf = (CYG_CAN_MSG_T *)cbuf->pdata; CYG_CAN_MSG_T *pbuf_message = &ptxbuf[cbuf->put]; cyg_can_message *pmessage = (cyg_can_message *)_buf; CYG_CAN_WRITE_MSG(pbuf_message, pmessage); // copy message cbuf->put = (cbuf->put + 1) % cbuf->len; cbuf->data_cnt++; size -= sizeof(cyg_can_message); } } // while (size > 0) (funs->start_xmit)(chan); // Start output as necessary cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&cbuf->lock); return res; }
//=========================================================================== // Read one single CAN event from hw //=========================================================================== static Cyg_ErrNo can_read(cyg_io_handle_t handle, void *_buf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; can_channel *chan = (can_channel *)t->priv; can_cbuf_t *cbuf = &chan->in_cbuf; cyg_uint32 size = 0; Cyg_ErrNo res = ENOERR; // // the user need to provide a can event buffer // if (*len != sizeof(cyg_can_event)) { return -EINVAL; } cyg_drv_mutex_lock(&cbuf->lock); cbuf->abort = false; cyg_drv_dsr_lock(); // avoid race conditions while (size < *len) { // // if message buffer contains at least one message then read the // oldest message from buffer and return // if (cbuf->data_cnt > 0) { CYG_CAN_EVENT_T *prxbuf = (CYG_CAN_EVENT_T *)cbuf->pdata; CYG_CAN_EVENT_T *pbuf_event = &prxbuf[cbuf->get]; cyg_can_event *pevent = (cyg_can_event *)_buf; CYG_CAN_READ_EVENT(pevent, pbuf_event); // copy event cbuf->get = (cbuf->get + 1) % cbuf->len; cbuf->data_cnt--; size += sizeof(cyg_can_event); } else { // // if messaeg buffer does not contain any message, then wait until // a message arrives or return immediatelly if nonblocking calls are // supported // cbuf->waiting = true; #if defined(CYGOPT_IO_CAN_SUPPORT_NONBLOCKING) #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) // // If timeouts are enabled and we use nonblocking calls then we // can use the timeout values // if (!cbuf->blocking) { if(!CYG_DRV_COND_WAIT(&cbuf->wait, cbuf->timeout)) { cbuf->abort = true; } } // if (!cbuf->blocking)# else #else // #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) // // if this is a nonblocking call then we return immediatelly // if (!cbuf->blocking) { *len = 0; res = -EAGAIN; break; } else #endif // #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) #endif // #if defined(CYGOPT_IO_CAN_SUPPORT_NONBLOCKING) { if(!cyg_drv_cond_wait(&cbuf->wait)) { cbuf->abort = true; } } if (cbuf->abort) { *len = size; cbuf->abort = false; cbuf->waiting = false; res = -EINTR; break; } } } // while (size < *len) cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&cbuf->lock); return res; }
static void spi_at91_transfer(cyg_spi_at91_device_t *dev, cyg_uint32 count, const cyg_uint8 *tx_data, cyg_uint8 *rx_data) { cyg_spi_at91_bus_t *spi_bus = (cyg_spi_at91_bus_t *)dev->spi_device.spi_bus; // Since PDC transfer buffer counters are 16 bit long, // we have to split longer transfers into chunks. while (count > 0) { cyg_uint16 tr_count = count > 0xFFFF ? 0xFFFF : count; // Set rx buf pointer and counter if (NULL != rx_data) { HAL_WRITE_UINT32(AT91_SPI+AT91_SPI_RPR, (cyg_uint32)rx_data); HAL_WRITE_UINT32(AT91_SPI+AT91_SPI_RCR, (cyg_uint32)tr_count); } // Set tx buf pointer and counter HAL_WRITE_UINT32(AT91_SPI+AT91_SPI_TPR, (cyg_uint32)tx_data); HAL_WRITE_UINT32(AT91_SPI+AT91_SPI_TCR, (cyg_uint32)tr_count); // Enable the SPI int events we are interested in HAL_WRITE_UINT32(AT91_SPI+AT91_SPI_IER, AT91_SPI_SR_ENDRX | AT91_SPI_SR_ENDTX); cyg_drv_mutex_lock(&spi_bus->transfer_mx); { spi_bus->transfer_end = false; // Unmask the SPI int cyg_drv_interrupt_unmask(CYGNUM_HAL_INTERRUPT_SPI); // Wait for its completition cyg_drv_dsr_lock(); { while (!spi_bus->transfer_end) cyg_drv_cond_wait(&spi_bus->transfer_cond); } cyg_drv_dsr_unlock(); } cyg_drv_mutex_unlock(&spi_bus->transfer_mx); if (NULL == rx_data) { cyg_uint32 val; // If rx buffer was NULL, then the PDC receiver data transfer // was not started and we didn't wait for ENDRX, but only for // ENDTX. Meaning that right now the last byte is being serialized // over the line and when finished input data will appear in // rx data reg. We have to wait for this to happen here, if we // don't we'll get the last received byte as the first one in the // next transfer! // FIXME: is there any better way to do this? // If not, then precalculate this value. val = 8000000/dev->cl_brate; CYGACC_CALL_IF_DELAY_US(val > 1 ? val : 1); // Clear the rx data reg HAL_READ_UINT32(AT91_SPI+AT91_SPI_RDR, val); } // Adjust running variables if (NULL != rx_data) rx_data += tr_count; tx_data += tr_count; count -= tr_count; } }
cyg_uint32 zynq_i2c_rx(const cyg_i2c_device* dev, cyg_bool send_start, cyg_uint8* rx_data, cyg_uint32 count, cyg_bool send_nack, cyg_bool send_stop) { //get driver private data cyg_zynq_i2c_extra* extra = (cyg_zynq_i2c_extra*)dev->i2c_bus->i2c_extra; cyg_uint16 ctrl_reg; cyg_uint8 bytes_to_send; cyg_uint16 isr_status; #ifdef ZYNQ_I2C_DEBUG DPRINTF("RX start\n"); #endif extra->i2c_addr = dev->i2c_address; extra->i2c_bytes_left = count; extra->i2c_rx_buf = rx_data; if(send_start) { HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); // Clear all except div fields ctrl_reg &= 0xff00; // clear FIFO, master receive mode ctrl_reg |= ((1 << CR_RW) | (1 << CR_CLR_FIFO) | (1 << CR_ACKEN) | (1 << CR_NEA) | (1 << CR_MS)); if(extra->i2c_bytes_left > XI2CPS_FIFO_DEPTH || !send_stop || (extra->i2c_flag & I2C_FLAG_ACT)) { ctrl_reg |= (1 << CR_HOLD); extra->i2c_hold_flag = 1; } // Write config bits HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); // write number of data to receive from slave if(extra->i2c_bytes_left > XI2CPS_FIFO_DEPTH) HAL_WRITE_UINT8(XI2CPS_BASE + XI2CPS_XFER_SIZE_OFFSET, XI2CPS_FIFO_DEPTH + 1); else HAL_WRITE_UINT8(XI2CPS_BASE + XI2CPS_XFER_SIZE_OFFSET, extra->i2c_bytes_left); // Write Slave address - generate start condition, tx start HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_ADDR_OFFSET, (dev->i2c_address & XI2CPS_ADDR_MASK)); } else //transmission in progress { // write number of data to receive from slave if(extra->i2c_bytes_left > XI2CPS_FIFO_DEPTH) HAL_WRITE_UINT8(XI2CPS_BASE + XI2CPS_XFER_SIZE_OFFSET, XI2CPS_FIFO_DEPTH + 1); else HAL_WRITE_UINT8(XI2CPS_BASE + XI2CPS_XFER_SIZE_OFFSET, extra->i2c_bytes_left); } #ifdef ZYNQ_I2C_DEBUG DPRINTF("isr starting\n"); #endif cyg_drv_mutex_lock(&extra->i2c_lock); cyg_drv_dsr_lock(); cyg_drv_interrupt_unmask(extra->i2c_isr_vector); #ifdef ZYNQ_I2C_DEBUG DPRINTF("waiting for data reception...\n"); #endif while(!(extra->i2c_flag & (I2C_FLAG_FINISH | I2C_FLAG_ERROR))) { cyg_drv_cond_wait(&extra->i2c_wait); } #ifdef ZYNQ_I2C_DEBUG DPRINTF("data received!?\n"); #endif cyg_drv_interrupt_mask(extra->i2c_isr_vector); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&extra->i2c_lock); if(extra->i2c_flag & I2C_FLAG_ERROR) { #ifdef ZYNQ_I2C_DEBUG DPRINTF("RX error extra->i2c_flag = "); diag_printf("%x\n", extra->i2c_flag); #endif extra->i2c_flag = 0; //TODO: condition for hold_flag? HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); ctrl_reg &= ~(1 << CR_HOLD); HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); } else { if(send_stop) { //TODO: condition for hold_flag? HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); ctrl_reg &= ~(1 << CR_HOLD); HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); extra->i2c_flag = 0; } else { extra->i2c_flag = I2C_FLAG_ACT; } } count -= extra->i2c_bytes_left; extra->i2c_addr = 0; extra->i2c_bytes_left = 0; extra->i2c_rx_buf = NULL; #ifdef ZYNQ_I2C_DEBUG DPRINTF("rx finished\n") #endif return count; }
/** * * QSPI bus transfer with IRQ function. * * @param qspi_bus - QSPI bus handle * @param count - Number of bytes to transmit. * @param tx_data - Pointer to TX buffer. * @param rx_data - Pointer to RX buffer. * * @return none * *****************************************************************************/ static void qspi_xc7z_transfer(cyg_qspi_xc7z_device_t *dev, cyg_uint32 count, const cyg_uint8 *tx_data, cyg_uint8 *rx_data) { entry_debug(); cyg_qspi_xc7z_bus_t *qspi_bus = (cyg_qspi_xc7z_bus_t *)dev->qspi_device.spi_bus; cyg_uint32 val; // Enable device HAL_WRITE_UINT32(qspi_bus->base + XQSPIPS_ER_OFFSET, XQSPIPS_ER_ENABLE_MASK); // Enable manual start HAL_READ_UINT32(qspi_bus->base + XQSPIPS_CR_OFFSET, val); val |= XQSPIPS_CR_MANSTRTEN_MASK | XQSPIPS_CR_SSFORCE_MASK; HAL_WRITE_UINT32(qspi_bus->base + XQSPIPS_CR_OFFSET, val); // Set tx buf pointer and counter if (NULL != tx_data) HAL_DCACHE_STORE(tx_data, count); // Set rx buf pointer and counter if (NULL != rx_data) HAL_DCACHE_FLUSH(rx_data, count); // Send first instruction if(qspi_bus->uc_tx_instr == 0) qspi_xc7z_send_instruction(qspi_bus); { if ((qspi_bus->us_tx_bytes) && ((qspi_bus->uc_tx_instr != XQSPIPS_FLASH_OPCODE_FAST_READ) || (qspi_bus->uc_tx_instr != XQSPIPS_FLASH_OPCODE_DUAL_READ) || (qspi_bus->uc_tx_instr != XQSPIPS_FLASH_OPCODE_QUAD_READ) )) qspi_xc7z_fill_tx_fifo(qspi_bus,count); // Enable the QSPI int events we are interested in HAL_WRITE_UINT32(qspi_bus->base + XQSPIPS_IER_OFFSET, XQSPIPS_IXR_TXOW_MASK | XQSPIPS_IXR_MODF_MASK); HAL_READ_UINT32(qspi_bus->base + XQSPIPS_CR_OFFSET, val); val |= XQSPIPS_CR_MANSTRT_MASK; HAL_WRITE_UINT32(qspi_bus->base + XQSPIPS_CR_OFFSET, val); cyg_drv_mutex_lock(&qspi_bus->transfer_mx); { qspi_bus->transfer_end = false; // Unmask the SPI int cyg_drv_interrupt_unmask(qspi_bus->interrupt_number); // Wait for its completion cyg_drv_dsr_lock(); { while (!qspi_bus->transfer_end) cyg_drv_cond_wait(&qspi_bus->transfer_cond); } cyg_drv_dsr_unlock(); } cyg_drv_mutex_unlock(&qspi_bus->transfer_mx); } }
//========================================================================== // receive into a buffer from a device //========================================================================== cyg_uint32 cyg_lpc2xxx_i2c_rx(const cyg_i2c_device *dev, cyg_bool send_start, cyg_uint8 *rx_data, cyg_uint32 count, cyg_bool send_nak, cyg_bool send_stop) { cyg_lpc2xxx_i2c_extra* extra = (cyg_lpc2xxx_i2c_extra*)dev->i2c_bus->i2c_extra; extra->i2c_addr = (dev->i2c_address << 1) | 0x01; extra->i2c_count = count; extra->i2c_rxbuf = rx_data; extra->i2c_rxnak = send_nak; // // for a repeated start the SI bit has to be reset // if we continue a previous transfer, start reception // if(send_start) { SET_CON(extra, CON_STA); if (I2C_FLAG_ACT == extra->i2c_flag) { CLR_CON(extra, CON_SI); } } extra->i2c_flag = 0; // // the isr will do most of the work, and the dsr will signal when an // error occurred or the transfer finished // cyg_drv_mutex_lock(&extra->i2c_lock); cyg_drv_dsr_lock(); cyg_drv_interrupt_unmask(I2C_ISRVEC(extra)); while(!(extra->i2c_flag & (I2C_FLAG_FINISH | I2C_FLAG_ERROR))) { cyg_drv_cond_wait(&extra->i2c_wait); } cyg_drv_interrupt_mask(I2C_ISRVEC(extra)); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&extra->i2c_lock); // too bad we have no way to tell the caller if (extra->i2c_flag & I2C_FLAG_ERROR) { diag_printf("I2C RX error flag: %x\n", extra->i2c_flag); extra->i2c_flag = 0; } else { if(send_stop) { SET_CON(extra, CON_STO); CLR_CON(extra, CON_SI | CON_STA); extra->i2c_flag = 0; } else { extra->i2c_flag = I2C_FLAG_ACT; } } count -= extra->i2c_count; extra->i2c_addr = 0; extra->i2c_count = 0; extra->i2c_rxbuf = NULL; return count; }
static Cyg_ErrNo disk_bwrite(cyg_io_handle_t handle, const void *buf, cyg_uint32 *len, // In blocks cyg_uint32 pos) // In blocks { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *) handle; disk_channel *chan = (disk_channel *) t->priv; disk_controller *ctlr = chan->controller; disk_funs *funs = chan->funs; cyg_disk_info_t *info = chan->info; cyg_uint32 size = *len; cyg_uint8 *bbuf = (cyg_uint8 * const) buf; Cyg_ErrNo res = ENOERR; cyg_uint32 last; cyg_drv_mutex_lock( &ctlr->lock ); while( ctlr->busy ) cyg_drv_cond_wait( &ctlr->queue ); if (info->connected && chan->valid) { ctlr->busy = true; if (NULL != chan->partition) { pos += chan->partition->start; last = chan->partition->end; } else { last = info->blocks_num-1; } D(("disk write block=%d len=%d buf=%p\n", pos, *len, buf)); while( size > 0 ) { cyg_uint32 tfr = size; if (pos > last) { res = -EIO; goto done; } if( tfr > info->ident.max_transfer ) tfr = info->ident.max_transfer; ctlr->result = -EWOULDBLOCK; cyg_drv_dsr_lock(); res = (funs->write)(chan, (void*)bbuf, tfr, pos); if( res == -EWOULDBLOCK ) { // If the driver replys EWOULDBLOCK, then the transfer is // being handled asynchronously and when it is finished it // will call disk_transfer_done(). This will wake us up here // to continue. while( ctlr->result == -EWOULDBLOCK ) cyg_drv_cond_wait( &ctlr->async ); res = ctlr->result; } cyg_drv_dsr_unlock(); if (ENOERR != res) goto done; if (!info->connected) { res = -EINVAL; goto done; } bbuf += tfr * info->block_size; pos += tfr; size -= tfr; } ctlr->busy = false; cyg_drv_cond_signal( &ctlr->queue ); } else res = -EINVAL; done: cyg_drv_mutex_unlock( &ctlr->lock ); #ifdef CYGPKG_KERNEL cyg_thread_yield(); #endif *len -= size; return res; }
static void usbs_at91_endpoint_start (usbs_rx_endpoint * pep) { int epn = usbs_at91_pep_to_number(pep); cyg_addrword_t pCSR = pCSRn(epn); cyg_addrword_t pFDR = pFDRn(epn); cyg_uint16 space = 0; cyg_uint16 endpoint_size = usbs_at91_endpoint_fifo_size[epn]; cyg_uint8 **ppbegin = &usbs_at91_endpoint_pbegin[epn]; cyg_uint8 **ppend = &usbs_at91_endpoint_pend[epn]; CYG_ASSERT (pep->complete_fn, "No complete_fn()"); cyg_drv_dsr_lock (); if (usbs_at91_ep0.state != USBS_STATE_CONFIGURED) { /* If not configured it means there is nothing to do */ cyg_drv_dsr_unlock (); if (pep->complete_fn) { (*pep->complete_fn) (pep->complete_data, -EPIPE); } return; } if (pep->halted) { /* Halted means nothing to do */ cyg_drv_dsr_unlock (); if (pep->complete_fn) { (*pep->complete_fn) (pep->complete_data, -EAGAIN); } return; } if (BITS_ARE_SET (pIMR, 1 << epn)) { cyg_drv_dsr_unlock (); if (pep->complete_fn) { (*pep->complete_fn) (pep->complete_data, -EIO); } return; } CYG_ASSERT (BITS_ARE_SET (pCSR, 1 << 9), "Wrong endpoint type"); *ppbegin = pep->buffer; /* Set the working pointers */ *ppend = (cyg_uint8 *) ((cyg_uint32) pep->buffer + pep->buffer_size); if (BITS_ARE_SET (pCSR, 0x400)) { /* IN: tx_endpoint */ space = (cyg_uint32) * ppend - (cyg_uint32) * ppbegin; if (space == endpoint_size) { *ppend = *ppbegin; /* Send zero-packet */ } *ppbegin = write_fifo_uint8 (pFDR, *ppbegin, (cyg_uint8 *) ((cyg_uint32) * ppbegin + MIN (space, endpoint_size))); SET_BITS (pCSR, AT91_UDP_CSR_TXPKTRDY); if (*ppend == *ppbegin) { /* Last packet ? */ *ppend = *ppbegin - 1; /* The packet hasn't been sent yet */ } } usbs_at91_endpoint_interrupt_enable (epn, true); cyg_drv_dsr_unlock (); }
/* * dev - device structure (see: io/i2c/i2c.h) * send_start - true if we must send start condition (only on first part of data) * tx_data - no comments * count - number of data in tx_data * send_stop - true if there will be no more data to send at this session, we must send stop */ cyg_uint32 zynq_i2c_tx(const cyg_i2c_device* dev, cyg_bool send_start, const cyg_uint8* tx_data, cyg_uint32 count, cyg_bool send_stop) { //get driver private data cyg_zynq_i2c_extra* extra = (cyg_zynq_i2c_extra*)dev->i2c_bus->i2c_extra; cyg_uint16 ctrl_reg; cyg_uint8 bytes_to_send; cyg_uint16 isr_value; extra->i2c_addr = dev->i2c_address; extra->i2c_bytes_left = count; extra->i2c_tx_buf = tx_data; #ifdef ZYNQ_I2C_DEBUG DPRINTF("tx start\n"); #endif if(send_start) //init transmission { HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); // Clear all except div fields ctrl_reg &= 0xff00; // clear FIFO, master transmit mode ctrl_reg |= ((1 << CR_CLR_FIFO) | (1 << CR_ACKEN) | (1 << CR_NEA) | (1 << CR_MS)); // if there are more data to send than fifo length // or we don't want to send stop // or we continue proevious transmission: set HOLD if(extra->i2c_bytes_left > XI2CPS_FIFO_DEPTH || !send_stop || (extra->i2c_flag & I2C_FLAG_ACT)) { ctrl_reg |= (1 << CR_HOLD); extra->i2c_hold_flag = 1; } else { extra->i2c_hold_flag = 0; } // Write config bits HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); // Feed data to FIFO zynq_i2c_feed_data(extra); // Write Slave address - generate start condition, tx start HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_ADDR_OFFSET, (dev->i2c_address & XI2CPS_ADDR_MASK)); } else //transmission in progress { // Feed data to FIFO zynq_i2c_feed_data(extra); } #ifdef ZYNQ_I2C_DEBUG DPRINTF("isr starting\n"); #endif //TODO: mutex_lock on bus level performed in io/i2c.cxx //TODO: line below to remove? cyg_drv_mutex_lock(&extra->i2c_lock); cyg_drv_dsr_lock(); cyg_drv_interrupt_unmask(extra->i2c_isr_vector); #ifdef ZYNQ_I2C_DEBUG DPRINTF("waiting for data transmission...\n"); #endif while(!(extra->i2c_flag & (I2C_FLAG_FINISH | I2C_FLAG_ERROR))) { #ifdef ZYNQ_I2C_DEBUG HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, isr_value); diag_printf("CR: %x\n", isr_value); HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_SR_OFFSET, isr_value); diag_printf("SR: %x\n", isr_value); HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_ADDR_OFFSET, isr_value); diag_printf("ADDR: %x\n", isr_value); HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_ISR_OFFSET, isr_value); diag_printf("ISR: %x\n", isr_value); HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_IMR_OFFSET, isr_value); diag_printf("IMR: %x\n", isr_value); #endif cyg_drv_cond_wait(&extra->i2c_wait); } #ifdef ZYNQ_I2C_DEBUG DPRINTF("data transmitted!?\n"); #endif cyg_drv_interrupt_mask(extra->i2c_isr_vector); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&extra->i2c_lock); if(extra->i2c_flag & I2C_FLAG_ERROR) { #ifdef ZYNQ_I2C_DEBUG DPRINTF("TX error extra->i2c_flag = "); diag_printf("%x\n", extra->i2c_flag); #endif extra->i2c_flag = 0; //TODO: condition for hold_flag? HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); ctrl_reg &= ~(1 << CR_HOLD); HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); } else { if(send_stop) { //TODO: condition for hold_flag? HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); ctrl_reg &= ~(1 << CR_HOLD); HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); extra->i2c_flag = 0; } else { extra->i2c_flag = I2C_FLAG_ACT; } } count -= extra->i2c_bytes_left; extra->i2c_addr = 0; extra->i2c_bytes_left = 0; extra->i2c_tx_buf = NULL; #ifdef ZYNQ_I2C_DEBUG DPRINTF("tx finished\n"); #endif return count; }
//=========================================================================== // Set CAN channel configuration //=========================================================================== static Cyg_ErrNo can_set_config(cyg_io_handle_t handle, cyg_uint32 key, const void *xbuf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; can_channel *chan = (can_channel *)t->priv; Cyg_ErrNo res = ENOERR; can_lowlevel_funs *funs = chan->funs; can_cbuf_t *out_cbuf = &chan->out_cbuf; can_cbuf_t *in_cbuf = &chan->in_cbuf; switch (key) { #ifdef CYGOPT_IO_CAN_SUPPORT_NONBLOCKING // // Set calls to read function to blocking / nonblocking mode // case CYG_IO_SET_CONFIG_READ_BLOCKING: { if (*len < sizeof(cyg_uint32) || 0 == in_cbuf->len) { return -EINVAL; } in_cbuf->blocking = (1 == *(cyg_uint32*)xbuf) ? true : false; } break; // // set calls to write functions to blocking / nonblocking mode // case CYG_IO_SET_CONFIG_WRITE_BLOCKING: { if (*len < sizeof(cyg_uint32) || 0 == out_cbuf->len) { return -EINVAL; } out_cbuf->blocking = (1 == *(cyg_uint32*)xbuf) ? true : false; } break; #endif // CYGOPT_IO_CAN_SUPPORT_NONBLOCKING #ifdef CYGOPT_IO_CAN_SUPPORT_TIMEOUTS // // return current timeouts // case CYG_IO_SET_CONFIG_CAN_TIMEOUT : { cyg_can_timeout_info_t *ptimeout_info; if (*len < sizeof(cyg_can_timeout_info_t)) { return -EINVAL; } *len = sizeof(cyg_can_timeout_info_t); ptimeout_info = (cyg_can_timeout_info_t *)xbuf; in_cbuf->timeout = ptimeout_info->rx_timeout; out_cbuf->timeout = ptimeout_info->tx_timeout; } break; // case CYG_IO_GET_CONFIG_CAN_TIMEOUT_INFO #endif // CYGOPT_IO_CAN_SUPPORT_TIMEOUTS case CYG_IO_SET_CONFIG_CAN_INPUT_FLUSH: { // // Flush any buffered input // if (in_cbuf->len == 0) { break; // Nothing to do if not buffered } cyg_drv_mutex_lock(&in_cbuf->lock); // Stop any further input processing cyg_drv_dsr_lock(); if (in_cbuf->waiting) { in_cbuf->abort = true; cyg_drv_cond_broadcast(&in_cbuf->wait); in_cbuf->waiting = false; } in_cbuf->get = in_cbuf->put = in_cbuf->data_cnt = 0; // Flush buffered input // // Pass to the hardware driver in case it wants to flush FIFOs etc. // (funs->set_config)(chan, CYG_IO_SET_CONFIG_CAN_INPUT_FLUSH, NULL, NULL); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&in_cbuf->lock); } // CYG_IO_SET_CONFIG_CAN_INPUT_FLUSH: // // flush any buffered output // case CYG_IO_SET_CONFIG_CAN_OUTPUT_FLUSH: { // Throw away any pending output if (out_cbuf->len == 0) { break; // Nothing to do if not buffered } cyg_drv_mutex_lock(&out_cbuf->lock); // Stop any further output processing cyg_drv_dsr_lock(); if (out_cbuf->data_cnt > 0) { out_cbuf->get = out_cbuf->put = out_cbuf->data_cnt = 0; // Empties queue! (funs->stop_xmit)(chan); // Done with transmit } // // Pass to the hardware driver in case it wants to flush FIFOs etc. // (funs->set_config)(chan, CYG_IO_SET_CONFIG_CAN_OUTPUT_FLUSH, NULL, NULL); if (out_cbuf->waiting) { out_cbuf->abort = true; cyg_drv_cond_broadcast(&out_cbuf->wait); out_cbuf->waiting = false; }// if (out_cbuf->waiting) cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&out_cbuf->lock); } break; // CYG_IO_GET_CONFIG_CAN_OUTPUT_FLUSH: // // wait until all messages in outbut buffer are sent // case CYG_IO_GET_CONFIG_SERIAL_OUTPUT_DRAIN: { // Wait for any pending output to complete if (out_cbuf->len == 0) { break; // Nothing to do if not buffered } cyg_drv_mutex_lock(&out_cbuf->lock); // Stop any further output processing cyg_drv_dsr_lock(); while (out_cbuf->pending || (out_cbuf->data_cnt > 0)) { out_cbuf->waiting = true; if(!cyg_drv_cond_wait(&out_cbuf->wait)) { res = -EINTR; } } cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&out_cbuf->lock); } break;// CYG_IO_GET_CONFIG_SERIAL_OUTPUT_DRAIN: // // Abort any outstanding I/O, including blocked reads // Caution - assumed to be called from 'timeout' (i.e. DSR) code // case CYG_IO_SET_CONFIG_CAN_ABORT : { in_cbuf->abort = true; cyg_drv_cond_broadcast(&in_cbuf->wait); out_cbuf->abort = true; cyg_drv_cond_broadcast(&out_cbuf->wait); } break; #ifdef CYGOPT_IO_CAN_SUPPORT_CALLBACK // // Set callback configuration // To disable callback set flag_mask = 0 // case CYG_IO_SET_CONFIG_CAN_CALLBACK: { if (*len != sizeof(cyg_can_callback_cfg)) { return -EINVAL; } // Copy data under DSR locking cyg_drv_dsr_lock(); chan->callback_cfg = *((cyg_can_callback_cfg*) xbuf); cyg_drv_dsr_unlock(); } break; #endif //CYGOPT_IO_CAN_SUPPORT_CALLBACK default: // // pass down to lower layers // res = (funs->set_config)(chan, key, xbuf, len); } // switch (key) return res; }