static Cyg_ErrNo disk_set_config(cyg_io_handle_t handle, cyg_uint32 key, const void *xbuf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *) handle; disk_channel *chan = (disk_channel *) t->priv; disk_controller *ctlr = chan->controller; cyg_disk_info_t *info = chan->info; disk_funs *funs = chan->funs; Cyg_ErrNo res = ENOERR; cyg_drv_mutex_lock( &ctlr->lock ); while( ctlr->busy ) cyg_drv_cond_wait( &ctlr->queue ); if (info->connected && chan->valid) { ctlr->busy = true; D(("disk set config key=%d\n", key)); switch ( key ) { case CYG_IO_SET_CONFIG_DISK_MOUNT: chan->mounts++; info->mounts++; D(("disk mount: chan %d disk %d\n",chan->mounts, info->mounts)); break; case CYG_IO_SET_CONFIG_DISK_UMOUNT: chan->mounts--; info->mounts--; D(("disk umount: chan %d disk %d\n",chan->mounts, info->mounts)); break; default: break; } // pass down to lower layers res = (funs->set_config)(chan, key, xbuf, len); ctlr->busy = false; cyg_drv_cond_signal( &ctlr->queue ); } else res = -EINVAL; cyg_drv_mutex_unlock( &ctlr->lock ); return res; }
static Cyg_ErrNo disk_get_config(cyg_io_handle_t handle, cyg_uint32 key, void *xbuf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *) handle; disk_channel *chan = (disk_channel *) t->priv; disk_controller *ctlr = chan->controller; cyg_disk_info_t *info = chan->info; cyg_disk_info_t *buf = (cyg_disk_info_t *) xbuf; disk_funs *funs = chan->funs; Cyg_ErrNo res = ENOERR; cyg_drv_mutex_lock( &ctlr->lock ); while( ctlr->busy ) cyg_drv_cond_wait( &ctlr->queue ); if (info->connected && chan->valid) { ctlr->busy = true; D(("disk get config key=%d\n", key)); switch (key) { case CYG_IO_GET_CONFIG_DISK_INFO: if (*len < sizeof(cyg_disk_info_t)) { res = -EINVAL; break; } D(("chan->info->block_size %u\n", chan->info->block_size )); D(("chan->info->blocks_num %u\n", chan->info->blocks_num )); D(("chan->info->phys_block_size %u\n", chan->info->phys_block_size )); *buf = *chan->info; *len = sizeof(cyg_disk_info_t); break; default: // pass down to lower layers res = (funs->get_config)(chan, key, xbuf, len); } ctlr->busy = false; cyg_drv_cond_signal( &ctlr->queue ); } else res = -EINVAL; cyg_drv_mutex_unlock( &ctlr->lock ); return res; }
Cyg_ErrNo usbs_devtab_cwrite(cyg_io_handle_t handle, const void* buf, cyg_uint32* size) { usbs_callback_data wait; cyg_devtab_entry_t* devtab_entry; usbs_tx_endpoint* endpoint; int result = ENOERR; CYG_REPORT_FUNCTION(); wait.completed = 0; cyg_drv_mutex_init(&wait.lock); cyg_drv_cond_init(&wait.signal, &wait.lock); devtab_entry = (cyg_devtab_entry_t*) handle; CYG_CHECK_DATA_PTR( devtab_entry, "A valid endpoint must be supplied"); endpoint = (usbs_tx_endpoint*) devtab_entry->priv; CYG_CHECK_DATA_PTR( endpoint, "The handle must correspond to a USB endpoint"); CYG_CHECK_FUNC_PTR( endpoint->start_tx_fn, "The endpoint must have a start_tx function"); endpoint->buffer = (unsigned char*) buf; endpoint->buffer_size = (int) *size; endpoint->complete_fn = &usbs_devtab_callback; endpoint->complete_data = (void*) &wait; (*endpoint->start_tx_fn)(endpoint); cyg_drv_mutex_lock(&wait.lock); cyg_drv_dsr_lock(); while (!wait.completed) { cyg_drv_cond_wait(&wait.signal); } cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&wait.lock); if (wait.result < 0) { result = wait.result; } else { *size = wait.result; } cyg_drv_cond_destroy(&wait.signal); cyg_drv_mutex_destroy(&wait.lock); CYG_REPORT_RETURN(); return result; }
// --------------------------------------------------------------------------- // // Read Master Boot Record (partitions) // static Cyg_ErrNo read_mbr(disk_channel *chan) { cyg_disk_info_t *info = chan->info; disk_funs *funs = chan->funs; disk_controller *ctlr = chan->controller; cyg_uint8 *buf = (cyg_uint8*)malloc(info->block_size); Cyg_ErrNo res = ENOERR; int i; D(("read MBR\n")); for (i = 0; i < info->partitions_num; i++) info->partitions[i].type = 0x00; cyg_drv_mutex_lock( &ctlr->lock ); while( ctlr->busy ) cyg_drv_cond_wait( &ctlr->queue ); ctlr->busy = true; ctlr->result = -EWOULDBLOCK; for( i = 0; i < sizeof(buf); i++ ) buf[i] = 0; //diag_printf("buf = %p\n",buf); res = (funs->read)(chan, (void *)buf, 1, 0); if( res == -EWOULDBLOCK ) { // If the driver replys EWOULDBLOCK, then the transfer is // being handled asynchronously and when it is finished it // will call disk_transfer_done(). This will wake us up here // to continue. while( ctlr->result == -EWOULDBLOCK ) cyg_drv_cond_wait( &ctlr->async ); res = ctlr->result; } ctlr->busy = false; cyg_drv_mutex_unlock( &ctlr->lock ); if (ENOERR != res) return res; #ifdef DEBUG diag_dump_buf_with_offset( buf, 512, buf ); #endif //for test //buf[MBR_SIG_ADDR+0]=0x55;buf[MBR_SIG_ADDR+1]=0xAA; if (MBR_SIG_BYTE0 == buf[MBR_SIG_ADDR+0] && MBR_SIG_BYTE1 == buf[MBR_SIG_ADDR+1]) { int npart; D(("disk MBR found\n")); npart = info->partitions_num < MBR_PART_NUM ? info->partitions_num : MBR_PART_NUM; for (i = 0; i < MBR_PART_NUM; i++) { cyg_disk_partition_t *part = &info->partitions[i]; part->index = i+1; read_partition(&buf[MBR_PART_ADDR+MBR_PART_SIZE*i], info, part); //read_partition(&buftest[MBR_PART_SIZE*i], info, part); #ifdef DEBUG if (0x00 != part->type) { D(("\ndisk MBR partition %d:\n", i)); D((" type = %02X\n", part->type)); D((" state = %02X\n", part->state)); D((" start = %d\n", part->start)); D((" end = %d\n", part->end)); D((" size = %d\n\n", part->size)); } #endif #if EBR if(part->type == 0x05 || part->type == 0x0F) read_ebr(chan,part); #endif } } free(buf); return ENOERR; }
static Cyg_ErrNo read_ebr(disk_channel *chan,cyg_disk_partition_t *part) { cyg_disk_info_t *info = chan->info; disk_funs *funs = chan->funs; disk_controller *ctlr = chan->controller; cyg_uint8 *buf = (cyg_uint8*)malloc(info->block_size); Cyg_ErrNo res = ENOERR; cyg_uint32 start=0; int i,index = 5; cyg_disk_partition_t *oldpart,*newpart=part; D(("read EBR\n")); while(1) { cyg_drv_mutex_lock( &ctlr->lock ); while( ctlr->busy ) cyg_drv_cond_wait( &ctlr->queue ); ctlr->busy = true; ctlr->result = -EWOULDBLOCK; res = (funs->read)(chan, (void *)buf, 1, newpart->start); if( res == -EWOULDBLOCK ) { // If the driver replys EWOULDBLOCK, then the transfer is // being handled asynchronously and when it is finished it // will call disk_transfer_done(). This will wake us up here // to continue. while( ctlr->result == -EWOULDBLOCK ) cyg_drv_cond_wait( &ctlr->async ); res = ctlr->result; } ctlr->busy = false; cyg_drv_mutex_unlock( &ctlr->lock ); if (ENOERR != res) return res; #ifdef DEBUG diag_dump_buf_with_offset( buf, 512, buf ); #endif if (MBR_SIG_BYTE0 == buf[MBR_SIG_ADDR+0] && MBR_SIG_BYTE1 == buf[MBR_SIG_ADDR+1]) { D(("disk EBR found\n")); start = newpart->start; read_partition(&buf[MBR_PART_ADDR+MBR_PART_SIZE*0], info, newpart); //read_partition(&buftest[MBR_PART_SIZE*0], info, newpart); newpart->start += start; newpart->end += start; #ifdef DEBUG if (0x00 != part->type) { // D(("\ndisk MBR partition %d:\n", i)); D((" type = %02X\n", newpart->type)); D((" state = %02X\n", newpart->state)); D((" start = %d\n", newpart->start)); D((" end = %d\n", newpart->end)); D((" size = %d\n\n", newpart->size)); } #endif if(0x00 != buf[MBR_PART_ADDR+MBR_PART_SIZE+4]) //if(0x00 != buftest[MBR_PART_SIZE*1+4]) { oldpart = newpart; newpart = (cyg_disk_partition_t *)malloc(sizeof(struct cyg_disk_partition_t)); memset(newpart,0,sizeof(struct cyg_disk_partition_t)); oldpart->pnext = newpart; newpart->index = index; index++; info->partitions_num++; newpart->pnext = NULL; read_partition(&buf[MBR_PART_ADDR+MBR_PART_SIZE], info, newpart); newpart->start += start; newpart->end += start; } else break; } else break; } free(buf); return res; }
/** * * QSPI bus transfer with IRQ function. * * @param qspi_bus - QSPI bus handle * @param count - Number of bytes to transmit. * @param tx_data - Pointer to TX buffer. * @param rx_data - Pointer to RX buffer. * * @return none * *****************************************************************************/ static void qspi_xc7z_transfer(cyg_qspi_xc7z_device_t *dev, cyg_uint32 count, const cyg_uint8 *tx_data, cyg_uint8 *rx_data) { entry_debug(); cyg_qspi_xc7z_bus_t *qspi_bus = (cyg_qspi_xc7z_bus_t *)dev->qspi_device.spi_bus; cyg_uint32 val; // Enable device HAL_WRITE_UINT32(qspi_bus->base + XQSPIPS_ER_OFFSET, XQSPIPS_ER_ENABLE_MASK); // Enable manual start HAL_READ_UINT32(qspi_bus->base + XQSPIPS_CR_OFFSET, val); val |= XQSPIPS_CR_MANSTRTEN_MASK | XQSPIPS_CR_SSFORCE_MASK; HAL_WRITE_UINT32(qspi_bus->base + XQSPIPS_CR_OFFSET, val); // Set tx buf pointer and counter if (NULL != tx_data) HAL_DCACHE_STORE(tx_data, count); // Set rx buf pointer and counter if (NULL != rx_data) HAL_DCACHE_FLUSH(rx_data, count); // Send first instruction if(qspi_bus->uc_tx_instr == 0) qspi_xc7z_send_instruction(qspi_bus); { if ((qspi_bus->us_tx_bytes) && ((qspi_bus->uc_tx_instr != XQSPIPS_FLASH_OPCODE_FAST_READ) || (qspi_bus->uc_tx_instr != XQSPIPS_FLASH_OPCODE_DUAL_READ) || (qspi_bus->uc_tx_instr != XQSPIPS_FLASH_OPCODE_QUAD_READ) )) qspi_xc7z_fill_tx_fifo(qspi_bus,count); // Enable the QSPI int events we are interested in HAL_WRITE_UINT32(qspi_bus->base + XQSPIPS_IER_OFFSET, XQSPIPS_IXR_TXOW_MASK | XQSPIPS_IXR_MODF_MASK); HAL_READ_UINT32(qspi_bus->base + XQSPIPS_CR_OFFSET, val); val |= XQSPIPS_CR_MANSTRT_MASK; HAL_WRITE_UINT32(qspi_bus->base + XQSPIPS_CR_OFFSET, val); cyg_drv_mutex_lock(&qspi_bus->transfer_mx); { qspi_bus->transfer_end = false; // Unmask the SPI int cyg_drv_interrupt_unmask(qspi_bus->interrupt_number); // Wait for its completion cyg_drv_dsr_lock(); { while (!qspi_bus->transfer_end) cyg_drv_cond_wait(&qspi_bus->transfer_cond); } cyg_drv_dsr_unlock(); } cyg_drv_mutex_unlock(&qspi_bus->transfer_mx); } }
cyg_uint32 zynq_i2c_rx(const cyg_i2c_device* dev, cyg_bool send_start, cyg_uint8* rx_data, cyg_uint32 count, cyg_bool send_nack, cyg_bool send_stop) { //get driver private data cyg_zynq_i2c_extra* extra = (cyg_zynq_i2c_extra*)dev->i2c_bus->i2c_extra; cyg_uint16 ctrl_reg; cyg_uint8 bytes_to_send; cyg_uint16 isr_status; #ifdef ZYNQ_I2C_DEBUG DPRINTF("RX start\n"); #endif extra->i2c_addr = dev->i2c_address; extra->i2c_bytes_left = count; extra->i2c_rx_buf = rx_data; if(send_start) { HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); // Clear all except div fields ctrl_reg &= 0xff00; // clear FIFO, master receive mode ctrl_reg |= ((1 << CR_RW) | (1 << CR_CLR_FIFO) | (1 << CR_ACKEN) | (1 << CR_NEA) | (1 << CR_MS)); if(extra->i2c_bytes_left > XI2CPS_FIFO_DEPTH || !send_stop || (extra->i2c_flag & I2C_FLAG_ACT)) { ctrl_reg |= (1 << CR_HOLD); extra->i2c_hold_flag = 1; } // Write config bits HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); // write number of data to receive from slave if(extra->i2c_bytes_left > XI2CPS_FIFO_DEPTH) HAL_WRITE_UINT8(XI2CPS_BASE + XI2CPS_XFER_SIZE_OFFSET, XI2CPS_FIFO_DEPTH + 1); else HAL_WRITE_UINT8(XI2CPS_BASE + XI2CPS_XFER_SIZE_OFFSET, extra->i2c_bytes_left); // Write Slave address - generate start condition, tx start HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_ADDR_OFFSET, (dev->i2c_address & XI2CPS_ADDR_MASK)); } else //transmission in progress { // write number of data to receive from slave if(extra->i2c_bytes_left > XI2CPS_FIFO_DEPTH) HAL_WRITE_UINT8(XI2CPS_BASE + XI2CPS_XFER_SIZE_OFFSET, XI2CPS_FIFO_DEPTH + 1); else HAL_WRITE_UINT8(XI2CPS_BASE + XI2CPS_XFER_SIZE_OFFSET, extra->i2c_bytes_left); } #ifdef ZYNQ_I2C_DEBUG DPRINTF("isr starting\n"); #endif cyg_drv_mutex_lock(&extra->i2c_lock); cyg_drv_dsr_lock(); cyg_drv_interrupt_unmask(extra->i2c_isr_vector); #ifdef ZYNQ_I2C_DEBUG DPRINTF("waiting for data reception...\n"); #endif while(!(extra->i2c_flag & (I2C_FLAG_FINISH | I2C_FLAG_ERROR))) { cyg_drv_cond_wait(&extra->i2c_wait); } #ifdef ZYNQ_I2C_DEBUG DPRINTF("data received!?\n"); #endif cyg_drv_interrupt_mask(extra->i2c_isr_vector); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&extra->i2c_lock); if(extra->i2c_flag & I2C_FLAG_ERROR) { #ifdef ZYNQ_I2C_DEBUG DPRINTF("RX error extra->i2c_flag = "); diag_printf("%x\n", extra->i2c_flag); #endif extra->i2c_flag = 0; //TODO: condition for hold_flag? HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); ctrl_reg &= ~(1 << CR_HOLD); HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); } else { if(send_stop) { //TODO: condition for hold_flag? HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); ctrl_reg &= ~(1 << CR_HOLD); HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); extra->i2c_flag = 0; } else { extra->i2c_flag = I2C_FLAG_ACT; } } count -= extra->i2c_bytes_left; extra->i2c_addr = 0; extra->i2c_bytes_left = 0; extra->i2c_rx_buf = NULL; #ifdef ZYNQ_I2C_DEBUG DPRINTF("rx finished\n") #endif return count; }
static Cyg_ErrNo serial_read(cyg_io_handle_t handle, void *_buf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; serial_channel *chan = (serial_channel *)t->priv; serial_funs *funs = chan->funs; cyg_uint8 *buf = (cyg_uint8 *)_buf; cyg_int32 size = 0; cbuf_t *cbuf = &chan->in_cbuf; Cyg_ErrNo res = ENOERR; #ifdef XX_CYGDBG_DIAG_BUF extern int enable_diag_uart; int _enable = enable_diag_uart; int _time, _stime; externC cyg_tick_count_t cyg_current_time(void); #endif // CYGDBG_DIAG_BUF cyg_drv_mutex_lock(&cbuf->lock); cbuf->abort = false; if (cbuf->len == 0) { // Non interrupt driven (i.e. polled) operation while (size++ < *len) { cyg_uint8 c = (funs->getc)(chan); #ifdef CYGOPT_IO_SERIAL_FLOW_CONTROL_SOFTWARE // for software flow control, if the driver returns one of the // characters we act on it and then drop it (the app must not // see it) if ( chan->config.flags & CYGNUM_SERIAL_FLOW_XONXOFF_TX ) { if ( c == CYGDAT_IO_SERIAL_FLOW_CONTROL_XOFF_CHAR ) { throttle_tx( chan ); } else if ( c == CYGDAT_IO_SERIAL_FLOW_CONTROL_XON_CHAR ) { restart_tx( chan ); } else *buf++ = c; } else *buf++ = c; #else *buf++ = c; #endif } } else { cyg_drv_dsr_lock(); // Avoid races while (size < *len) { if (cbuf->nb > 0) { #ifdef CYGPKG_IO_SERIAL_FLOW_CONTROL if ( (cbuf->nb <= cbuf->low_water) && (chan->flow_desc.flags & CYG_SERIAL_FLOW_IN_THROTTLED) ) restart_rx( chan, false ); #endif *buf++ = cbuf->data[cbuf->get]; if (++cbuf->get == cbuf->len) cbuf->get = 0; cbuf->nb--; size++; } else { #ifdef CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING if (!cbuf->blocking) { *len = size; // characters actually read res = -EAGAIN; break; } #endif // CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING cbuf->waiting = true; #ifdef XX_CYGDBG_DIAG_BUF enable_diag_uart = 0; HAL_CLOCK_READ(&_time); _stime = (int)cyg_current_time(); diag_printf("READ wait - get: %d, put: %d, time: %x.%x\n", cbuf->get, cbuf->put, _stime, _time); enable_diag_uart = _enable; #endif // CYGDBG_DIAG_BUF if( !cyg_drv_cond_wait(&cbuf->wait) ) cbuf->abort = true; #ifdef XX_CYGDBG_DIAG_BUF enable_diag_uart = 0; HAL_CLOCK_READ(&_time); _stime = (int)cyg_current_time(); diag_printf("READ continue - get: %d, put: %d, time: %x.%x\n", cbuf->get, cbuf->put, _stime, _time); enable_diag_uart = _enable; #endif // CYGDBG_DIAG_BUF if (cbuf->abort) { // Give up! *len = size; // characters actually read cbuf->abort = false; cbuf->waiting = false; res = -EINTR; break; } } } cyg_drv_dsr_unlock(); } #ifdef XX_CYGDBG_DIAG_BUF cyg_drv_isr_lock(); enable_diag_uart = 0; HAL_CLOCK_READ(&_time); _stime = (int)cyg_current_time(); diag_printf("READ done - size: %d, len: %d, time: %x.%x\n", size, *len, _stime, _time); enable_diag_uart = _enable; cyg_drv_isr_unlock(); #endif // CYGDBG_DIAG_BUF cyg_drv_mutex_unlock(&cbuf->lock); return res; }
//=========================================================================== // Set CAN channel configuration //=========================================================================== static Cyg_ErrNo can_set_config(cyg_io_handle_t handle, cyg_uint32 key, const void *xbuf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; can_channel *chan = (can_channel *)t->priv; Cyg_ErrNo res = ENOERR; can_lowlevel_funs *funs = chan->funs; can_cbuf_t *out_cbuf = &chan->out_cbuf; can_cbuf_t *in_cbuf = &chan->in_cbuf; switch (key) { #ifdef CYGOPT_IO_CAN_SUPPORT_NONBLOCKING // // Set calls to read function to blocking / nonblocking mode // case CYG_IO_SET_CONFIG_READ_BLOCKING: { if (*len < sizeof(cyg_uint32) || 0 == in_cbuf->len) { return -EINVAL; } in_cbuf->blocking = (1 == *(cyg_uint32*)xbuf) ? true : false; } break; // // set calls to write functions to blocking / nonblocking mode // case CYG_IO_SET_CONFIG_WRITE_BLOCKING: { if (*len < sizeof(cyg_uint32) || 0 == out_cbuf->len) { return -EINVAL; } out_cbuf->blocking = (1 == *(cyg_uint32*)xbuf) ? true : false; } break; #endif // CYGOPT_IO_CAN_SUPPORT_NONBLOCKING #ifdef CYGOPT_IO_CAN_SUPPORT_TIMEOUTS // // return current timeouts // case CYG_IO_SET_CONFIG_CAN_TIMEOUT : { cyg_can_timeout_info_t *ptimeout_info; if (*len < sizeof(cyg_can_timeout_info_t)) { return -EINVAL; } *len = sizeof(cyg_can_timeout_info_t); ptimeout_info = (cyg_can_timeout_info_t *)xbuf; in_cbuf->timeout = ptimeout_info->rx_timeout; out_cbuf->timeout = ptimeout_info->tx_timeout; } break; // case CYG_IO_GET_CONFIG_CAN_TIMEOUT_INFO #endif // CYGOPT_IO_CAN_SUPPORT_TIMEOUTS case CYG_IO_SET_CONFIG_CAN_INPUT_FLUSH: { // // Flush any buffered input // if (in_cbuf->len == 0) { break; // Nothing to do if not buffered } cyg_drv_mutex_lock(&in_cbuf->lock); // Stop any further input processing cyg_drv_dsr_lock(); if (in_cbuf->waiting) { in_cbuf->abort = true; cyg_drv_cond_broadcast(&in_cbuf->wait); in_cbuf->waiting = false; } in_cbuf->get = in_cbuf->put = in_cbuf->data_cnt = 0; // Flush buffered input // // Pass to the hardware driver in case it wants to flush FIFOs etc. // (funs->set_config)(chan, CYG_IO_SET_CONFIG_CAN_INPUT_FLUSH, NULL, NULL); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&in_cbuf->lock); } // CYG_IO_SET_CONFIG_CAN_INPUT_FLUSH: // // flush any buffered output // case CYG_IO_SET_CONFIG_CAN_OUTPUT_FLUSH: { // Throw away any pending output if (out_cbuf->len == 0) { break; // Nothing to do if not buffered } cyg_drv_mutex_lock(&out_cbuf->lock); // Stop any further output processing cyg_drv_dsr_lock(); if (out_cbuf->data_cnt > 0) { out_cbuf->get = out_cbuf->put = out_cbuf->data_cnt = 0; // Empties queue! (funs->stop_xmit)(chan); // Done with transmit } // // Pass to the hardware driver in case it wants to flush FIFOs etc. // (funs->set_config)(chan, CYG_IO_SET_CONFIG_CAN_OUTPUT_FLUSH, NULL, NULL); if (out_cbuf->waiting) { out_cbuf->abort = true; cyg_drv_cond_broadcast(&out_cbuf->wait); out_cbuf->waiting = false; }// if (out_cbuf->waiting) cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&out_cbuf->lock); } break; // CYG_IO_GET_CONFIG_CAN_OUTPUT_FLUSH: // // wait until all messages in outbut buffer are sent // case CYG_IO_GET_CONFIG_SERIAL_OUTPUT_DRAIN: { // Wait for any pending output to complete if (out_cbuf->len == 0) { break; // Nothing to do if not buffered } cyg_drv_mutex_lock(&out_cbuf->lock); // Stop any further output processing cyg_drv_dsr_lock(); while (out_cbuf->pending || (out_cbuf->data_cnt > 0)) { out_cbuf->waiting = true; if(!cyg_drv_cond_wait(&out_cbuf->wait)) { res = -EINTR; } } cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&out_cbuf->lock); } break;// CYG_IO_GET_CONFIG_SERIAL_OUTPUT_DRAIN: // // Abort any outstanding I/O, including blocked reads // Caution - assumed to be called from 'timeout' (i.e. DSR) code // case CYG_IO_SET_CONFIG_CAN_ABORT : { in_cbuf->abort = true; cyg_drv_cond_broadcast(&in_cbuf->wait); out_cbuf->abort = true; cyg_drv_cond_broadcast(&out_cbuf->wait); } break; #ifdef CYGOPT_IO_CAN_SUPPORT_CALLBACK // // Set callback configuration // To disable callback set flag_mask = 0 // case CYG_IO_SET_CONFIG_CAN_CALLBACK: { if (*len != sizeof(cyg_can_callback_cfg)) { return -EINVAL; } // Copy data under DSR locking cyg_drv_dsr_lock(); chan->callback_cfg = *((cyg_can_callback_cfg*) xbuf); cyg_drv_dsr_unlock(); } break; #endif //CYGOPT_IO_CAN_SUPPORT_CALLBACK default: // // pass down to lower layers // res = (funs->set_config)(chan, key, xbuf, len); } // switch (key) return res; }
//=========================================================================== // Read one single CAN event from hw //=========================================================================== static Cyg_ErrNo can_read(cyg_io_handle_t handle, void *_buf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; can_channel *chan = (can_channel *)t->priv; can_cbuf_t *cbuf = &chan->in_cbuf; cyg_uint32 size = 0; Cyg_ErrNo res = ENOERR; // // the user need to provide a can event buffer // if (*len != sizeof(cyg_can_event)) { return -EINVAL; } cyg_drv_mutex_lock(&cbuf->lock); cbuf->abort = false; cyg_drv_dsr_lock(); // avoid race conditions while (size < *len) { // // if message buffer contains at least one message then read the // oldest message from buffer and return // if (cbuf->data_cnt > 0) { CYG_CAN_EVENT_T *prxbuf = (CYG_CAN_EVENT_T *)cbuf->pdata; CYG_CAN_EVENT_T *pbuf_event = &prxbuf[cbuf->get]; cyg_can_event *pevent = (cyg_can_event *)_buf; CYG_CAN_READ_EVENT(pevent, pbuf_event); // copy event cbuf->get = (cbuf->get + 1) % cbuf->len; cbuf->data_cnt--; size += sizeof(cyg_can_event); } else { // // if messaeg buffer does not contain any message, then wait until // a message arrives or return immediatelly if nonblocking calls are // supported // cbuf->waiting = true; #if defined(CYGOPT_IO_CAN_SUPPORT_NONBLOCKING) #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) // // If timeouts are enabled and we use nonblocking calls then we // can use the timeout values // if (!cbuf->blocking) { if(!CYG_DRV_COND_WAIT(&cbuf->wait, cbuf->timeout)) { cbuf->abort = true; } } // if (!cbuf->blocking)# else #else // #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) // // if this is a nonblocking call then we return immediatelly // if (!cbuf->blocking) { *len = 0; res = -EAGAIN; break; } else #endif // #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) #endif // #if defined(CYGOPT_IO_CAN_SUPPORT_NONBLOCKING) { if(!cyg_drv_cond_wait(&cbuf->wait)) { cbuf->abort = true; } } if (cbuf->abort) { *len = size; cbuf->abort = false; cbuf->waiting = false; res = -EINTR; break; } } } // while (size < *len) cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&cbuf->lock); return res; }
//=========================================================================== // Write exactly one CAN message to CAN bus //=========================================================================== static Cyg_ErrNo can_write(cyg_io_handle_t handle, const void *_buf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; can_channel *chan = (can_channel *)t->priv; can_lowlevel_funs *funs = chan->funs; Cyg_ErrNo res = ENOERR; can_cbuf_t *cbuf = &chan->out_cbuf; cyg_uint32 size = *len; // // the user need to provide a can message buffer // if (*len != sizeof(cyg_can_message)) { return -EINVAL; } cyg_drv_mutex_lock(&cbuf->lock); cbuf->abort = false; cyg_drv_dsr_lock(); // avoid race condition while testing pointers while (size > 0) { if (cbuf->data_cnt == cbuf->len) { cbuf->waiting = true; // Buffer full - wait for space funs->start_xmit(chan); // Make sure xmit is running // // Check flag: 'start_xmit' may have obviated the need // to wait // if (cbuf->waiting) { cbuf->pending += size; // Have this much more to send [eventually] #if defined(CYGOPT_IO_CAN_SUPPORT_NONBLOCKING) #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) // // If timeouts are enabled and we use nonblocking calls then we // can use the timeout values // if (!cbuf->blocking) { if(!CYG_DRV_COND_WAIT(&cbuf->wait, cbuf->timeout)) { cbuf->abort = true; } } // if (!cbuf->blocking)# else #else // #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) // // if this is a nonblocking call then we return immediatelly // if (!cbuf->blocking) { *len = 0; res = -EAGAIN; break; } else #endif // #if defined(CYGOPT_IO_CAN_SUPPORT_TIMEOUTS) #endif //defined(CYGOPT_IO_CAN_SUPPORT_NONBLOCKING) { if(!cyg_drv_cond_wait(&cbuf->wait)) { cbuf->abort = true; } } cbuf->pending -= size; if (cbuf->abort) { // Give up! *len -= size; // number of characters actually sent cbuf->abort = false; cbuf->waiting = false; res = -EINTR; break; } // if (cbuf->abort) } // if (cbuf->waiting) } // if (cbuf->data_cnt == cbuf->len) else { // // there is enougth space left so we can store additional data // CYG_CAN_MSG_T *ptxbuf = (CYG_CAN_MSG_T *)cbuf->pdata; CYG_CAN_MSG_T *pbuf_message = &ptxbuf[cbuf->put]; cyg_can_message *pmessage = (cyg_can_message *)_buf; CYG_CAN_WRITE_MSG(pbuf_message, pmessage); // copy message cbuf->put = (cbuf->put + 1) % cbuf->len; cbuf->data_cnt++; size -= sizeof(cyg_can_message); } } // while (size > 0) (funs->start_xmit)(chan); // Start output as necessary cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&cbuf->lock); return res; }
static void spi_transaction_do (cyg_spi_device* device, cyg_bool tick_only, cyg_bool polled, cyg_uint32 count, const cyg_uint8* tx_data, cyg_uint8* rx_data, cyg_bool drop_cs) { cyg_spi_freescale_dspi_bus_t* dspi_bus = (cyg_spi_freescale_dspi_bus_t*) device->spi_bus; cyg_spi_freescale_dspi_device_t* dspi_device = (cyg_spi_freescale_dspi_device_t*) device; cyg_bool bus_16bit = dspi_device->clocking.bus_16bit; cyghwr_devs_freescale_dspi_t* dspi_p = dspi_bus->setup_p->dspi_p; cyghwr_hal_freescale_dma_set_t* dma_set_p; cyghwr_hal_freescale_edma_t* edma_p = NULL; cyg_uint32 count_down; cyg_uint32 txfifo_n = dspi_bus->txfifo_n; cyg_uint32 pushr; cyg_uint32 pushque_n; cyg_uint32 dma_chan_rx_i = 0; cyg_uint32 dma_chan_tx_i = 0; cyg_uint8* rx_data0; #if DEBUG_SPI >= 2 cyg_uint32 first_turn = 1; #endif DEBUG2_PRINTF("DSPI: transaction: count=%d drop_cs=%d tick_only=%d\n", count, drop_cs, tick_only); // Set up peripheral CS field. DSPI automatically asserts and deasserts CS pushr = #ifndef CYGOPT_DEVS_SPI_FREESCALE_DSPI_TICK_ONLY_DROPS_CS // Compatibility option // eCos Reference Manual states that CS should drop prior to sending // ticks, but other SPI drivers do not touch the CS. tick_only ? dspi_p->pushr & 0x87FF0000 : #endif dspi_chip_select_set( #ifdef CYGOPT_DEVS_SPI_FREESCALE_DSPI_TICK_ONLY_DROPS_CS // Compatibility option. See comment above. tick_only ? -1 : #endif dspi_device->dev_num, dspi_p->mcr & FREESCALE_DSPI_MCR_PCSSE_M, true); pushr |= FREESCALE_DSPI_PUSHR_CONT_M; dspi_fifo_clear(dspi_p); pushque_n = dspi_bus->pushque_n; if(bus_16bit) txfifo_n *= 2; dma_set_p = dspi_bus->setup_p->dma_set_p; if((count > txfifo_n) && dma_set_p) { rx_data0 = rx_data; edma_p = dma_set_p->edma_p; // Set up the DMA channels. dma_chan_rx_i = SPI_DMA_CHAN_I(dma_set_p, RX); dma_chan_tx_i = SPI_DMA_CHAN_I(dma_set_p, TX); rx_dma_channel_setup(dma_set_p, (cyg_uint8*) rx_data, bus_16bit, &edma_p->tcd[dma_chan_rx_i]); hal_freescale_edma_erq_enable(edma_p, dma_chan_rx_i); dspi_irq_enable(dspi_p, FREESCALE_DSPI_RSER_TFFF_RE_M | FREESCALE_DSPI_RSER_RFDF_RE_M | FREESCALE_DSPI_RSER_TFFF_DIRS_M | FREESCALE_DSPI_RSER_RFDF_DIRS_M); } else { rx_data0 = NULL; // If byte count fits in the FIFO don't bother with DMA. if(dma_set_p) { edma_p = dma_set_p->edma_p; hal_freescale_edma_erq_disable(edma_p, SPI_DMA_CHAN_I(dma_set_p, RX)); } dma_set_p = NULL; dspi_irq_disable(dspi_p, FREESCALE_DSPI_RSER_TFFF_RE_M | FREESCALE_DSPI_RSER_RFDF_RE_M | FREESCALE_DSPI_RSER_TFFF_DIRS_M | FREESCALE_DSPI_RSER_RFDF_DIRS_M); } if(!polled) cyg_drv_interrupt_unmask(dspi_bus->setup_p->intr_num); count_down = count; while(count_down) { #if DEBUG_SPI >= 2 if(first_turn) { if(dspi_bus->pushque_p) dspi_bus->pushque_p[0] |= FREESCALE_DSPI_PUSHR_CTCNT_M; first_turn = 0; } #endif if(dma_set_p && (count_down > txfifo_n)) { // Transfer size is larger than DSPI FIFO // Use DMA Tx count_down = tx_dma_channel_setup(dspi_bus, (cyg_uint8*) tx_data, count_down, bus_16bit, pushr, drop_cs); #if DEBUG_SPI >= 3 hal_freescale_edma_transfer_diag(edma_p, dma_chan_rx_i, true); #endif // Enable the Tx DMA / SPI controller. hal_freescale_edma_erq_enable(edma_p, dma_chan_tx_i); DSPI_EOQ_CLEAR(dspi_p); } else { // Transfer size fits within DSPI FIFO // No need for DMA Tx DSPI_EOQ_CLEAR(dspi_p); count_down = fifo_pushque_fill(dspi_bus, (cyg_uint8*) tx_data, count_down, bus_16bit, pushr, drop_cs); #if DEBUG_SPI >= 3 cyghwr_devs_freescale_dspi_diag(dspi_bus); #endif } if(polled) { DEBUG2_PRINTF("DSPI Polled:\n"); // Busy-wait for DSPI/DMA (polling for completion). while(!(dspi_p->sr & FREESCALE_DSPI_SR_EOQF_M)); if(dma_set_p) { // Disable the Tx DMA channel on completion. hal_freescale_edma_erq_disable(edma_p, dma_chan_tx_i); } } else { // Wait for DSPI/DMA completion. (interrupt driven). cyg_drv_mutex_lock(&dspi_bus->transfer_mutex); cyg_drv_dsr_lock(); DSPI_IRQ_ENABLE(dspi_p); DEBUG2_PRINTF("DSPI IRQ: Enabled\n"); // Sit back and wait for the ISR/DSRs to signal completion. cyg_drv_cond_wait (&dspi_bus->transfer_done); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&dspi_bus->transfer_mutex); } if(dma_set_p) { // Make sure that Rx has been drained by DMA. while((dspi_p->sr & FREESCALE_DSPI_SR_RFDF_M)); DEBUG2_PRINTF("Fifo Drained by DMA 0x%08x\n", dspi_p->sr); if(count_down <= txfifo_n && count_down > 0) { hal_freescale_edma_erq_disable(edma_p, dma_chan_rx_i); dma_set_p = NULL; } } else { // No DMA - "manually" drain Rx FIFO DEBUG2_PRINTF("DSPI FIFO: 'Manually' drain Rx fifo rx_data=%p bus_16bit=%d\n", rx_data, bus_16bit); #if DEBUG_SPI >= 3 cyghwr_devs_freescale_dspi_diag(dspi_bus); #endif if(rx_data) { if(bus_16bit) { cyg_uint16* rx_data16 = (cyg_uint16*) rx_data; while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M) { DEBUG2_PRINTF(" Fifo Pull16 at %p\n", rx_data16); *rx_data16++ = dspi_p->popr; } rx_data = (cyg_uint8*) rx_data16; } else { while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M) { DEBUG2_PRINTF(" Fifo Pull at %p\n", rx_data); *rx_data++ = dspi_p->popr; } } } dspi_fifo_drain(dspi_p); } dspi_fifo_clear(dspi_p); // Prepare for next iteration if(tx_data) { tx_data += pushque_n; if(bus_16bit) tx_data += pushque_n; } } if(rx_data0) { // Rx buffer may be out of sync with cache. DEBUG2_PRINTF("DSPI DMA: Flush cache %p len=%d\n", rx_data0, count); HAL_DCACHE_INVALIDATE(rx_data0, count); DEBUG2_PRINTF("DSPI DMA: Cache flushed\n"); } if(!polled) cyg_drv_interrupt_mask(dspi_bus->setup_p->intr_num); dspi_device->chip_sel = !drop_cs; DEBUG2_PRINTF("cyg_transaction_do() chip_sel = %d drop_cs = %d\n", dspi_device->chip_sel, drop_cs); }
static Cyg_ErrNo disk_bwrite(cyg_io_handle_t handle, const void *buf, cyg_uint32 *len, // In blocks cyg_uint32 pos) // In blocks { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *) handle; disk_channel *chan = (disk_channel *) t->priv; disk_controller *ctlr = chan->controller; disk_funs *funs = chan->funs; cyg_disk_info_t *info = chan->info; cyg_uint32 size = *len; cyg_uint8 *bbuf = (cyg_uint8 * const) buf; Cyg_ErrNo res = ENOERR; cyg_uint32 last; cyg_drv_mutex_lock( &ctlr->lock ); while( ctlr->busy ) cyg_drv_cond_wait( &ctlr->queue ); if (info->connected && chan->valid) { ctlr->busy = true; if (NULL != chan->partition) { pos += chan->partition->start; last = chan->partition->end; } else { last = info->blocks_num-1; } D(("disk write block=%d len=%d buf=%p\n", pos, *len, buf)); while( size > 0 ) { cyg_uint32 tfr = size; if (pos > last) { res = -EIO; goto done; } if( tfr > info->ident.max_transfer ) tfr = info->ident.max_transfer; ctlr->result = -EWOULDBLOCK; cyg_drv_dsr_lock(); res = (funs->write)(chan, (void*)bbuf, tfr, pos); if( res == -EWOULDBLOCK ) { // If the driver replys EWOULDBLOCK, then the transfer is // being handled asynchronously and when it is finished it // will call disk_transfer_done(). This will wake us up here // to continue. while( ctlr->result == -EWOULDBLOCK ) cyg_drv_cond_wait( &ctlr->async ); res = ctlr->result; } cyg_drv_dsr_unlock(); if (ENOERR != res) goto done; if (!info->connected) { res = -EINVAL; goto done; } bbuf += tfr * info->block_size; pos += tfr; size -= tfr; } ctlr->busy = false; cyg_drv_cond_signal( &ctlr->queue ); } else res = -EINVAL; done: cyg_drv_mutex_unlock( &ctlr->lock ); #ifdef CYGPKG_KERNEL cyg_thread_yield(); #endif *len -= size; return res; }
static void spi_at91_transfer(cyg_spi_at91_device_t *dev, cyg_uint32 count, const cyg_uint8 *tx_data, cyg_uint8 *rx_data) { cyg_spi_at91_bus_t *spi_bus = (cyg_spi_at91_bus_t *)dev->spi_device.spi_bus; // Since PDC transfer buffer counters are 16 bit long, // we have to split longer transfers into chunks. while (count > 0) { cyg_uint16 tr_count = count > 0xFFFF ? 0xFFFF : count; // Set rx buf pointer and counter if (NULL != rx_data) { HAL_WRITE_UINT32(AT91_SPI+AT91_SPI_RPR, (cyg_uint32)rx_data); HAL_WRITE_UINT32(AT91_SPI+AT91_SPI_RCR, (cyg_uint32)tr_count); } // Set tx buf pointer and counter HAL_WRITE_UINT32(AT91_SPI+AT91_SPI_TPR, (cyg_uint32)tx_data); HAL_WRITE_UINT32(AT91_SPI+AT91_SPI_TCR, (cyg_uint32)tr_count); // Enable the SPI int events we are interested in HAL_WRITE_UINT32(AT91_SPI+AT91_SPI_IER, AT91_SPI_SR_ENDRX | AT91_SPI_SR_ENDTX); cyg_drv_mutex_lock(&spi_bus->transfer_mx); { spi_bus->transfer_end = false; // Unmask the SPI int cyg_drv_interrupt_unmask(CYGNUM_HAL_INTERRUPT_SPI); // Wait for its completition cyg_drv_dsr_lock(); { while (!spi_bus->transfer_end) cyg_drv_cond_wait(&spi_bus->transfer_cond); } cyg_drv_dsr_unlock(); } cyg_drv_mutex_unlock(&spi_bus->transfer_mx); if (NULL == rx_data) { cyg_uint32 val; // If rx buffer was NULL, then the PDC receiver data transfer // was not started and we didn't wait for ENDRX, but only for // ENDTX. Meaning that right now the last byte is being serialized // over the line and when finished input data will appear in // rx data reg. We have to wait for this to happen here, if we // don't we'll get the last received byte as the first one in the // next transfer! // FIXME: is there any better way to do this? // If not, then precalculate this value. val = 8000000/dev->cl_brate; CYGACC_CALL_IF_DELAY_US(val > 1 ? val : 1); // Clear the rx data reg HAL_READ_UINT32(AT91_SPI+AT91_SPI_RDR, val); } // Adjust running variables if (NULL != rx_data) rx_data += tr_count; tx_data += tr_count; count -= tr_count; } }
static void spi_transaction_do (cyg_spi_device* device, cyg_bool tick_only, cyg_bool polled, cyg_uint32 count, const cyg_uint8* tx_data, cyg_uint8* rx_data, cyg_bool drop_cs) { cyg_spi_freescale_dspi_bus_t* dspi_bus = (cyg_spi_freescale_dspi_bus_t*) device->spi_bus; cyg_spi_freescale_dspi_device_t* dspi_device = (cyg_spi_freescale_dspi_device_t*) device; cyg_bool bus_16bit = dspi_device->clocking.bus_16bit; cyghwr_devs_freescale_dspi_t* dspi_p = dspi_bus->setup_p->dspi_p; cyghwr_hal_freescale_dma_set_t* dma_set_p; cyghwr_hal_freescale_edma_t* edma_p = NULL; cyg_uint32 count_down; cyg_uint32 txfifo_n = dspi_bus->txfifo_n; cyg_uint32 pushr; cyg_uint32 pushque_n; cyg_uint32 dma_chan_rx_i = 0; cyg_uint32 dma_chan_tx_i = 0; #if DEBUG_SPI >= 2 cyg_uint32 first_turn = 1; #endif DEBUG2_PRINTF("DSPI: transaction: count=%d drop_cs=%d\n", count, drop_cs); // Set up peripheral CS field. DSPI automatically asserts and deasserts CS pushr = dspi_chip_select_set(tick_only ? -1 : dspi_device->dev_num, dspi_p->mcr & FREESCALE_DSPI_MCR_PCSSE_M, true); pushr |= FREESCALE_DSPI_PUSHR_CONT_M; dspi_fifo_clear(dspi_p); dspi_fifo_drain(dspi_p); pushque_n = dspi_bus->pushque_n; if(bus_16bit) txfifo_n *= 2; if((dma_set_p=dspi_bus->setup_p->dma_set_p)) { edma_p = dma_set_p->edma_p; // Set up the DMA channels. dma_chan_rx_i = SPI_DMA_CHAN_I(dma_set_p, RX); dma_chan_tx_i = SPI_DMA_CHAN_I(dma_set_p, TX); rx_dma_channel_setup(dma_set_p, (cyg_uint8*) rx_data, bus_16bit, &edma_p->tcd[dma_chan_rx_i]); hal_freescale_edma_erq_enable(edma_p, dma_chan_rx_i); } if(!polled) cyg_drv_interrupt_unmask(dspi_bus->setup_p->intr_num); count_down = count; while(count_down) { #if DEBUG_SPI >= 2 if(first_turn) { if(dspi_bus->pushque_p) dspi_bus->pushque_p[0] |= FREESCALE_DSPI_PUSHR_CTCNT_M; first_turn = 0; } #endif if(dma_set_p && (count_down > txfifo_n)) { // Transfer size is larger than DSPI FIFO // Use DMA Tx count_down = tx_dma_channel_setup(dspi_bus, (cyg_uint8*) tx_data, count_down, bus_16bit, pushr, drop_cs); #if DEBUG_SPI >= 3 hal_freescale_edma_transfer_diag(edma_p, dma_chan_rx_i, true); #endif // Enable the Tx DMA / SPI controller. hal_freescale_edma_erq_enable(edma_p, dma_chan_tx_i); DSPI_EOQ_CLEAR(dspi_p); } else { // Transfer size fits within DSPI FIFO // No need for DMA Tx DSPI_EOQ_CLEAR(dspi_p); count_down = fifo_pushque_fill(dspi_bus, (cyg_uint8*) tx_data, count_down, bus_16bit, pushr, drop_cs); #if DEBUG_SPI >= 3 cyghwr_devs_freescale_dspi_diag(dspi_bus); #endif } if(polled) { DEBUG2_PRINTF("DSPI Polled:\n"); // Busy-wait for DSPI/DMA (polling for completion). while(!(dspi_p->sr & FREESCALE_DSPI_SR_EOQF_M)); if(dma_set_p) // Disable the Tx DMA channel on completion. hal_freescale_edma_erq_disable(edma_p, dma_chan_tx_i); } else { // Wait for DSPI/DMA completion. (interrupt driven). cyg_drv_mutex_lock(&dspi_bus->transfer_mutex); cyg_drv_dsr_lock(); DSPI_IRQ_ENABLE(dspi_p); DEBUG2_PRINTF("DSPI IRQ: Enabled\n"); // Sit back and wait for the ISR/DSRs to signal completion. cyg_drv_cond_wait (&dspi_bus->transfer_done); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&dspi_bus->transfer_mutex); } if(dma_set_p) { // Make sure that Rx has been drained by DMA. if(rx_data) while((dspi_p->sr & FREESCALE_DSPI_SR_RFDF_M)); } else { // No DMA - "manually" drain Rx FIFO DEBUG2_PRINTF("DSPI FIFO: 'Manually' drain Rx fifo\n"); #if DEBUG_SPI >= 3 cyghwr_devs_freescale_dspi_diag(dspi_bus); #endif if(rx_data) { if(bus_16bit) { cyg_uint16* rx_data16 = (cyg_uint16*) rx_data; while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M) *rx_data16++ = dspi_p->popr; rx_data = (cyg_uint8*) rx_data16; } else { while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M) *rx_data++ = dspi_p->popr; } } else { dspi_fifo_drain(dspi_p); } } dspi_fifo_clear(dspi_p); // Prepare for next iteration if(tx_data) { tx_data += pushque_n; if(bus_16bit) tx_data += pushque_n; } } if(dma_set_p && rx_data) { // Rx buffer may be out of sync with cache. DEBUG2_PRINTF("DSPI DMA: Invalidate cache\n"); HAL_DCACHE_INVALIDATE(rx_data, count); DEBUG2_PRINTF("DSPI DMA: Cache invalidated\n"); } if(!polled) cyg_drv_interrupt_mask(dspi_bus->setup_p->intr_num); dspi_device->chip_sel = !drop_cs; }
static Cyg_ErrNo serial_write(cyg_io_handle_t handle, const void *_buf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; serial_channel *chan = (serial_channel *)t->priv; serial_funs *funs = chan->funs; cyg_int32 size = *len; cyg_uint8 *buf = (cyg_uint8 *)_buf; int next; cbuf_t *cbuf = &chan->out_cbuf; Cyg_ErrNo res = ENOERR; //diag_printf("serial_write cbuf->len = %d\n",cbuf->len); cyg_drv_mutex_lock(&cbuf->lock); cbuf->abort = false; if (cbuf->len == 0) { // Non interrupt driven (i.e. polled) operation while (size-- > 0) { #ifdef CYGPKG_IO_SERIAL_FLOW_CONTROL while ( ( 0 == (chan->flow_desc.flags & CYG_SERIAL_FLOW_OUT_THROTTLED) ) && ((funs->putc)(chan, *buf) == false) ) ; // Ignore full, keep trying #else while ((funs->putc)(chan, *buf) == false) ; // Ignore full, keep trying #endif buf++; } } else { cyg_drv_dsr_lock(); // Avoid race condition testing pointers while (size > 0) { next = cbuf->put + 1; if (next == cbuf->len) next = 0; if (cbuf->nb == cbuf->len) { cbuf->waiting = true; // Buffer full - wait for space #ifdef CYGPKG_IO_SERIAL_FLOW_CONTROL if ( 0 == (chan->flow_desc.flags & CYG_SERIAL_FLOW_OUT_THROTTLED) ) #endif (funs->start_xmit)(chan); // Make sure xmit is running // Check flag: 'start_xmit' may have obviated the need // to wait :-) if (cbuf->waiting) { #ifdef CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING // Optionally return if configured for non-blocking mode. if (!cbuf->blocking) { *len -= size; // number of characters actually sent cbuf->waiting = false; res = -EAGAIN; break; } #endif // CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING cbuf->pending += size; // Have this much more to send [eventually] if( !cyg_drv_cond_wait(&cbuf->wait) ) cbuf->abort = true; cbuf->pending -= size; } if (cbuf->abort) { // Give up! *len -= size; // number of characters actually sent cbuf->abort = false; cbuf->waiting = false; res = -EINTR; break; } } else { cbuf->data[cbuf->put++] = *buf++; cbuf->put = next; cbuf->nb++; size--; // Only count if actually sent! } } #ifdef CYGPKG_IO_SERIAL_FLOW_CONTROL if ( 0 == (chan->flow_desc.flags & CYG_SERIAL_FLOW_OUT_THROTTLED) ) #endif (funs->start_xmit)(chan); // Start output as necessary cyg_drv_dsr_unlock(); } cyg_drv_mutex_unlock(&cbuf->lock); return res; }
//========================================================================== // receive into a buffer from a device //========================================================================== cyg_uint32 cyg_lpc2xxx_i2c_rx(const cyg_i2c_device *dev, cyg_bool send_start, cyg_uint8 *rx_data, cyg_uint32 count, cyg_bool send_nak, cyg_bool send_stop) { cyg_lpc2xxx_i2c_extra* extra = (cyg_lpc2xxx_i2c_extra*)dev->i2c_bus->i2c_extra; extra->i2c_addr = (dev->i2c_address << 1) | 0x01; extra->i2c_count = count; extra->i2c_rxbuf = rx_data; extra->i2c_rxnak = send_nak; // // for a repeated start the SI bit has to be reset // if we continue a previous transfer, start reception // if(send_start) { SET_CON(extra, CON_STA); if (I2C_FLAG_ACT == extra->i2c_flag) { CLR_CON(extra, CON_SI); } } extra->i2c_flag = 0; // // the isr will do most of the work, and the dsr will signal when an // error occurred or the transfer finished // cyg_drv_mutex_lock(&extra->i2c_lock); cyg_drv_dsr_lock(); cyg_drv_interrupt_unmask(I2C_ISRVEC(extra)); while(!(extra->i2c_flag & (I2C_FLAG_FINISH | I2C_FLAG_ERROR))) { cyg_drv_cond_wait(&extra->i2c_wait); } cyg_drv_interrupt_mask(I2C_ISRVEC(extra)); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&extra->i2c_lock); // too bad we have no way to tell the caller if (extra->i2c_flag & I2C_FLAG_ERROR) { diag_printf("I2C RX error flag: %x\n", extra->i2c_flag); extra->i2c_flag = 0; } else { if(send_stop) { SET_CON(extra, CON_STO); CLR_CON(extra, CON_SI | CON_STA); extra->i2c_flag = 0; } else { extra->i2c_flag = I2C_FLAG_ACT; } } count -= extra->i2c_count; extra->i2c_addr = 0; extra->i2c_count = 0; extra->i2c_rxbuf = NULL; return count; }
static Cyg_ErrNo serial_get_config(cyg_io_handle_t handle, cyg_uint32 key, void *xbuf, cyg_uint32 *len) { cyg_devtab_entry_t *t = (cyg_devtab_entry_t *)handle; serial_channel *chan = (serial_channel *)t->priv; cyg_serial_info_t *buf = (cyg_serial_info_t *)xbuf; Cyg_ErrNo res = ENOERR; cbuf_t *out_cbuf = &chan->out_cbuf; cbuf_t *in_cbuf = &chan->in_cbuf; serial_funs *funs = chan->funs; switch (key) { case CYG_IO_GET_CONFIG_SERIAL_INFO: if (*len < sizeof(cyg_serial_info_t)) { return -EINVAL; } *buf = chan->config; *len = sizeof(chan->config); break; case CYG_IO_GET_CONFIG_SERIAL_BUFFER_INFO: // return rx/tx buffer sizes and counts { cyg_serial_buf_info_t *p; if (*len < sizeof(cyg_serial_buf_info_t)) return -EINVAL; *len = sizeof(cyg_serial_buf_info_t); p = (cyg_serial_buf_info_t *)xbuf; p->rx_bufsize = in_cbuf->len; if (p->rx_bufsize) p->rx_count = in_cbuf->nb; else p->rx_count = 0; p->tx_bufsize = out_cbuf->len; if (p->tx_bufsize) p->tx_count = out_cbuf->nb; else p->tx_count = 0; } break; case CYG_IO_GET_CONFIG_SERIAL_OUTPUT_DRAIN: // Wait for any pending output to complete if (out_cbuf->len == 0) break; // Nothing to do if not buffered cyg_drv_mutex_lock(&out_cbuf->lock); // Stop any further output processing cyg_drv_dsr_lock();//dsr lock, it will lead to dsr interrupt can't be called; and serial_xmt_char can't be called forever while (out_cbuf->pending || (out_cbuf->nb > 0)) { //clyu //these codes will race with serial_xmt_char(cyg_drv_cond_broadcast) //if modify w90n740_serial_putc(...) return true always, these code will not run out_cbuf->waiting = true; if(!cyg_drv_cond_wait(&out_cbuf->wait) ) res = -EINTR; } cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&out_cbuf->lock); break; case CYG_IO_GET_CONFIG_SERIAL_INPUT_FLUSH: // Flush any buffered input if (in_cbuf->len == 0) break; // Nothing to do if not buffered cyg_drv_mutex_lock(&in_cbuf->lock); // Stop any further input processing cyg_drv_dsr_lock(); if (in_cbuf->waiting) { in_cbuf->abort = true; cyg_drv_cond_signal(&in_cbuf->wait); in_cbuf->waiting = false; } in_cbuf->get = in_cbuf->put = in_cbuf->nb = 0; // Flush buffered input cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&in_cbuf->lock); break; case CYG_IO_GET_CONFIG_SERIAL_ABORT: // Abort any outstanding I/O, including blocked reads // Caution - assumed to be called from 'timeout' (i.e. DSR) code if (in_cbuf->len != 0) { in_cbuf->abort = true; cyg_drv_cond_signal(&in_cbuf->wait); } if (out_cbuf->len != 0) { out_cbuf->abort = true; cyg_drv_cond_signal(&out_cbuf->wait); } break; case CYG_IO_GET_CONFIG_SERIAL_OUTPUT_FLUSH: // Throw away any pending output if (out_cbuf->len == 0) break; // Nothing to do if not buffered cyg_drv_mutex_lock(&out_cbuf->lock); // Stop any further output processing cyg_drv_dsr_lock(); if (out_cbuf->nb > 0) { out_cbuf->get = out_cbuf->put = out_cbuf->nb = 0; // Empties queue! (funs->stop_xmit)(chan); // Done with transmit } if (out_cbuf->waiting) { out_cbuf->abort = true; cyg_drv_cond_signal(&out_cbuf->wait); out_cbuf->waiting = false; } cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&out_cbuf->lock); break; #ifdef CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING case CYG_IO_GET_CONFIG_READ_BLOCKING: if (*len < sizeof(cyg_uint32)) { return -EINVAL; } *(cyg_uint32*)xbuf = (in_cbuf->blocking) ? 1 : 0; break; case CYG_IO_GET_CONFIG_WRITE_BLOCKING: if (*len < sizeof(cyg_uint32)) { return -EINVAL; } *(cyg_uint32*)xbuf = (out_cbuf->blocking) ? 1 : 0; break; #endif // CYGOPT_IO_SERIAL_SUPPORT_NONBLOCKING default: res = -EINVAL; } return res; }
/* * dev - device structure (see: io/i2c/i2c.h) * send_start - true if we must send start condition (only on first part of data) * tx_data - no comments * count - number of data in tx_data * send_stop - true if there will be no more data to send at this session, we must send stop */ cyg_uint32 zynq_i2c_tx(const cyg_i2c_device* dev, cyg_bool send_start, const cyg_uint8* tx_data, cyg_uint32 count, cyg_bool send_stop) { //get driver private data cyg_zynq_i2c_extra* extra = (cyg_zynq_i2c_extra*)dev->i2c_bus->i2c_extra; cyg_uint16 ctrl_reg; cyg_uint8 bytes_to_send; cyg_uint16 isr_value; extra->i2c_addr = dev->i2c_address; extra->i2c_bytes_left = count; extra->i2c_tx_buf = tx_data; #ifdef ZYNQ_I2C_DEBUG DPRINTF("tx start\n"); #endif if(send_start) //init transmission { HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); // Clear all except div fields ctrl_reg &= 0xff00; // clear FIFO, master transmit mode ctrl_reg |= ((1 << CR_CLR_FIFO) | (1 << CR_ACKEN) | (1 << CR_NEA) | (1 << CR_MS)); // if there are more data to send than fifo length // or we don't want to send stop // or we continue proevious transmission: set HOLD if(extra->i2c_bytes_left > XI2CPS_FIFO_DEPTH || !send_stop || (extra->i2c_flag & I2C_FLAG_ACT)) { ctrl_reg |= (1 << CR_HOLD); extra->i2c_hold_flag = 1; } else { extra->i2c_hold_flag = 0; } // Write config bits HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); // Feed data to FIFO zynq_i2c_feed_data(extra); // Write Slave address - generate start condition, tx start HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_ADDR_OFFSET, (dev->i2c_address & XI2CPS_ADDR_MASK)); } else //transmission in progress { // Feed data to FIFO zynq_i2c_feed_data(extra); } #ifdef ZYNQ_I2C_DEBUG DPRINTF("isr starting\n"); #endif //TODO: mutex_lock on bus level performed in io/i2c.cxx //TODO: line below to remove? cyg_drv_mutex_lock(&extra->i2c_lock); cyg_drv_dsr_lock(); cyg_drv_interrupt_unmask(extra->i2c_isr_vector); #ifdef ZYNQ_I2C_DEBUG DPRINTF("waiting for data transmission...\n"); #endif while(!(extra->i2c_flag & (I2C_FLAG_FINISH | I2C_FLAG_ERROR))) { #ifdef ZYNQ_I2C_DEBUG HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, isr_value); diag_printf("CR: %x\n", isr_value); HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_SR_OFFSET, isr_value); diag_printf("SR: %x\n", isr_value); HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_ADDR_OFFSET, isr_value); diag_printf("ADDR: %x\n", isr_value); HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_ISR_OFFSET, isr_value); diag_printf("ISR: %x\n", isr_value); HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_IMR_OFFSET, isr_value); diag_printf("IMR: %x\n", isr_value); #endif cyg_drv_cond_wait(&extra->i2c_wait); } #ifdef ZYNQ_I2C_DEBUG DPRINTF("data transmitted!?\n"); #endif cyg_drv_interrupt_mask(extra->i2c_isr_vector); cyg_drv_dsr_unlock(); cyg_drv_mutex_unlock(&extra->i2c_lock); if(extra->i2c_flag & I2C_FLAG_ERROR) { #ifdef ZYNQ_I2C_DEBUG DPRINTF("TX error extra->i2c_flag = "); diag_printf("%x\n", extra->i2c_flag); #endif extra->i2c_flag = 0; //TODO: condition for hold_flag? HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); ctrl_reg &= ~(1 << CR_HOLD); HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); } else { if(send_stop) { //TODO: condition for hold_flag? HAL_READ_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); ctrl_reg &= ~(1 << CR_HOLD); HAL_WRITE_UINT16(XI2CPS_BASE + XI2CPS_CR_OFFSET, ctrl_reg); extra->i2c_flag = 0; } else { extra->i2c_flag = I2C_FLAG_ACT; } } count -= extra->i2c_bytes_left; extra->i2c_addr = 0; extra->i2c_bytes_left = 0; extra->i2c_tx_buf = NULL; #ifdef ZYNQ_I2C_DEBUG DPRINTF("tx finished\n"); #endif return count; }