/* * IN transfer on EP0 * 1. SETUP IN(0) * 2. SETUP IN(0, ep0_zlp_reply) OUT * 3. SETUP IN(act=len, zero) OUT * 4. SETUP IN(act=len) OUT * 5. SETUP IN(act<len) OUT */ static void handle_ep0_in_req(td243fc_rev2_softc_t *sc) { td243fc_rev2_ep_t *ep = &sc->ep[1]; request_t *req = ep->req; DBG_V(DSLAVE_DCD, ("DCD: handle_ep0_in_req\n")); /* Canceled requests handled in abort */ if (req->status == REQUEST_CANCELLED) return; req->bytes_transferred = req->transfer_size - BFGET4(TD243FC_TTLBTECNT, READ4(TD243FC_EPN_PACKET_CONTROL_REG(0, 1))); /* Check if it is a completion on data stage */ if ((req->transfer_size && !req->ep0_zlp_reply) || (!req->transfer_size && req->ep0_zlp_reply)) { ep->req = NULL; /* Start status OUT status stage */ start_transfer(sc, 0, req); } else { req_finish(sc, 1, req, REQUEST_COMPLETED); } }
int SPI::transfer(const Buffer& tx, const Buffer& rx, const event_callback_t& callback, int event) { if (spi_active(&_spi)) { return queue_transfer(tx, rx, callback, event); } start_transfer(tx, rx, callback, event); return 0; }
int SPI::transfer(const void *tx_buffer, int tx_length, void *rx_buffer, int rx_length, unsigned char bit_width, const event_callback_t& callback, int event) { if (spi_active(&_spi)) { return queue_transfer(tx_buffer, tx_length, rx_buffer, rx_length, bit_width, callback, event); } start_transfer(tx_buffer, tx_length, rx_buffer, rx_length, bit_width, callback, event); return 0; }
static int do_anyka_read(struct fsg_dev *fsg) { struct fsg_buffhd *bh; int rc; u32 amount_left; unsigned int amount; ssize_t nread; /* Carry out the file reads */ amount_left = fsg->data_size_from_cmnd; if (unlikely(amount_left == 0)) return -EIO; // No default reply for (;;) { /* Figure out how much we need to read: * Try to read the remaining amount. * But don't read more than the buffer size. * And don't try to read past the end of the file. * Finally, if we're not at a page boundary, don't read past * the next page. * If this means reading 0 then we were asked to read past * the end of file. */ amount = min((unsigned int) amount_left, mod_data.buflen); /* Wait for the next buffer to become available */ bh = fsg->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { rc = sleep_thread(fsg); if (rc) return rc; } nread = usbburn_read(bh->buf + nread, amount); amount_left -= nread; fsg->residue -= nread; bh->inreq->length = nread; bh->state = BUF_STATE_FULL; if (nread < amount) break; if (amount_left == 0) break; // No more left to read /* Send this buffer and go read some more */ bh->inreq->zero = 0; start_transfer(fsg, fsg->bulk_in, bh->inreq, &bh->inreq_busy, &bh->state); fsg->next_buffhd_to_fill = bh->next; } return -EIO; // No default reply }
void item_created (const char *import_uri) { if (import_uri == NULL) { /* Item creation failed, lets try next file */ goto_next_file (); } else { start_transfer ((char *) files->data, import_uri, cds_proxy, upnp_context); } }
/* NOTES: * - EP0 is automaticly queued by the DCD and passed to core_handle_ep0() * - The DCD handles SET_ADDRESS and CLEAR_FEATURE(EP_STALL) itself */ static jresult_t dcd_send_io_request(jdevice_t dev, pipe_desc_t *pipe, request_t *core_req) { td243fc_rev2_softc_t *sc = (td243fc_rev2_softc_t *)j_device_get_softc(dev); jint_t ep_n = 0; if (pipe) ep_n = (jint_t)(pipe->dcd_handle); else ep_n = (core_req->direction == DIRECTION_IN) ? 1 : 0; return start_transfer(sc, ep_n, core_req); }
void HPDMA_Complete_IRQHandler(void) #endif { /* Clear interrupt */ HPDMA_BITBAND->HPDMAICR_CLR_XFR_INT[g_hpdma_current_desc] = HPDMA_ENABLE; /* Disable interrupts */ HPDMA->Descriptor[g_hpdma_current_desc].HPDMACR_REG &= ~HPDMACR_ALL_IRQ_ENABLE_MASK; ++g_hpdma_current_desc; if(NO_OF_HPDMA_DESCRIPTORS == g_hpdma_current_desc) { g_hpdma_current_desc = 0u; } if(g_transfer.xfr_size > 0u) { if(NO_OF_HPDMA_DESCRIPTORS == g_hpdma_end_desc) { g_hpdma_end_desc = 0u; } else { ++g_hpdma_end_desc; } start_transfer(g_transfer.src_addr, g_transfer.des_addr, g_transfer.xfr_size, g_transfer.xfr_dir); } else { if(((HPDMA->Descriptor[g_hpdma_current_desc].HPDMACR_REG) & HPDMACR_ALL_IRQ_ENABLE_MASK) == 0u) { g_hpdma_current_desc = 0u; g_hpdma_end_desc = 0u; g_transfer.state = HPDMA_COMPLETED; if(g_transfer.completion_handler != NULL_HANDLER) { (*(g_transfer.completion_handler))(HPDMA_COMPLETED); } } } }
/*-------------------------------------------------------------------------*//** * See "mss_hpdma.h" for details of how to use this function. */ void MSS_HPDMA_start ( uint32_t src_addr, uint32_t dest_addr, uint32_t transfer_size, uint8_t transfer_dir ) { uint32_t hpdma_total_desc = 0u; ASSERT((src_addr & ADDRESS_NON_WORD_ALIGNED_MASK) == 0u); ASSERT((dest_addr & ADDRESS_NON_WORD_ALIGNED_MASK) == 0u); ASSERT((g_transfer.state != HPDMA_IN_PROGRESS) && (transfer_size > 0u)); /* Check transfer_size is not zero and that a transfer is not in progress */ if((g_transfer.state != HPDMA_IN_PROGRESS) && (transfer_size > 0u)) { NVIC_EnableIRQ(HPDMA_Complete_IRQn); NVIC_EnableIRQ(HPDMA_Error_IRQn); g_transfer.xfr_size = transfer_size; g_transfer.des_addr = dest_addr; g_transfer.src_addr = src_addr; g_transfer.xfr_dir = transfer_dir; /* Total no of descriptors used for transfer */ hpdma_total_desc = ((transfer_size - 1u) / MAX_SIZE_PER_DMA_XFR) + 1u; /* Current HPDMA descriptor */ g_hpdma_current_desc = 0u; g_hpdma_end_desc = 0u; /* Start transfer */ g_transfer.state = HPDMA_IN_PROGRESS; while((g_hpdma_end_desc < hpdma_total_desc) && (g_hpdma_end_desc < NO_OF_HPDMA_DESCRIPTORS)) { start_transfer(g_transfer.src_addr, g_transfer.des_addr, g_transfer.xfr_size, g_transfer.xfr_dir); ++g_hpdma_end_desc; } } }
/* Do NUM_TRANSFERS DMA transfers. */ static void do_transfer(dma_channel_desc_t *p_chan_desc) { #if (QM_SENSOR) uint32_t t_init; #else uint64_t t_init; #endif int return_code; QM_PUTS("Starting the transfer and waiting for 1 second."); /* Get system tick time. */ t_init = get_ticks(); /* Start first transfer. */ start_transfer(p_chan_desc, (uint32_t *)tx_data, (uint32_t *)rx_data[transfer_count], strlen(tx_data)); /* * Loop until 1 second has elapsed from start or * NUM_TRANSFERS transfers are finished. */ while (((get_ticks() - t_init) < SYS_TICKS_PER_S_32MHZ) && (transfer_count < NUM_TRANSFERS)) { if (irq_fired) { irq_fired = false; start_another_transfer(p_chan_desc); } } /* If the transfer has not finished then stop the channel. */ if (transfer_count < NUM_TRANSFERS) { return_code = qm_dma_transfer_terminate( p_chan_desc->controller_id, p_chan_desc->channel_id); if (return_code) { QM_PUTS("ERROR: qm_dma_transfer_stop"); } } }
/* Check last error code and start a new transfer. */ void start_another_transfer(dma_channel_desc_t *p_chan_desc) { /* * Check last error code. If last transfer ended with no * error, another transfer can be started. */ if (irq_error_code) { QM_PRINTF("Error: Transfer with error Code: %u\n", irq_error_code); } else { QM_PRINTF("Transfer Loop %d Complete with Data Length: %u\n", transfer_count, irq_len); transfer_count++; /* Start a new transfer. */ if (transfer_count < NUM_TRANSFERS) { start_transfer(p_chan_desc, (uint32_t *)tx_data, (uint32_t *)rx_data[transfer_count], strlen(tx_data)); } } }
static int utp_do_write(struct fsg_dev *fsg, void *data, size_t size) { struct fsg_buffhd *bh; int get_some_more; u32 amount_left_to_req, amount_left_to_write; unsigned int amount; int rc; loff_t offset; /* Carry out the file writes */ get_some_more = 1; amount_left_to_req = amount_left_to_write = size; if (unlikely(amount_left_to_write == 0)) return -EIO; offset = 0; while (amount_left_to_write > 0) { /* Queue a request for more data from the host */ bh = fsg->next_buffhd_to_fill; if (bh->state == BUF_STATE_EMPTY && get_some_more) { /* Figure out how much we want to get: * Try to get the remaining amount. * But don't get more than the buffer size. * And don't try to go past the end of the file. * If we're not at a page boundary, * don't go past the next page. * If this means getting 0, then we were asked * to write past the end of file. * Finally, round down to a block boundary. */ amount = min(amount_left_to_req, mod_data.buflen); if (amount == 0) { get_some_more = 0; /* cry now */ continue; } /* Get the next buffer */ amount_left_to_req -= amount; if (amount_left_to_req == 0) get_some_more = 0; /* amount is always divisible by 512, hence by * the bulk-out maxpacket size */ bh->outreq->length = bh->bulk_out_intended_length = amount; bh->outreq->short_not_ok = 1; start_transfer(fsg, fsg->bulk_out, bh->outreq, &bh->outreq_busy, &bh->state); fsg->next_buffhd_to_fill = bh->next; continue; } /* Write the received data to the backing file */ bh = fsg->next_buffhd_to_drain; if (bh->state == BUF_STATE_EMPTY && !get_some_more) break; /* We stopped early */ if (bh->state == BUF_STATE_FULL) { smp_rmb(); fsg->next_buffhd_to_drain = bh->next; bh->state = BUF_STATE_EMPTY; /* Did something go wrong with the transfer? */ if (bh->outreq->status != 0) /* cry again, COMMUNICATION_FAILURE */ break; amount = bh->outreq->actual; /* Perform the write */ memcpy(data + offset, bh->buf, amount); offset += amount; if (signal_pending(current)) return -EINTR; /* Interrupted!*/ amount_left_to_write -= amount; fsg->residue -= amount; /* Did the host decide to stop early? */ if (bh->outreq->actual != bh->outreq->length) { fsg->short_packet_received = 1; break; } continue; } /* Wait for something to happen */ rc = sleep_thread(fsg); if (rc) return rc; } return -EIO; }
static int utp_do_read(struct fsg_dev *fsg, void *data, size_t size) { struct fsg_buffhd *bh; int rc; u32 amount_left; unsigned int amount; /* Get the starting Logical Block Address and check that it's * not too big */ amount_left = size; if (unlikely(amount_left == 0)) return -EIO; /* No default reply*/ pr_debug("%s: sending %d\n", __func__, size); for (;;) { /* Figure out how much we need to read: * Try to read the remaining amount. * But don't read more than the buffer size. * And don't try to read past the end of the file. * Finally, if we're not at a page boundary, don't read past * the next page. * If this means reading 0 then we were asked to read past * the end of file. */ amount = min((unsigned int) amount_left, mod_data.buflen); /* Wait for the next buffer to become available */ bh = fsg->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { rc = sleep_thread(fsg); if (rc) return rc; } /* If we were asked to read past the end of file, * end with an empty buffer. */ if (amount == 0) { bh->inreq->length = 0; bh->state = BUF_STATE_FULL; break; } /* Perform the read */ pr_info("Copied to %p, %d bytes started from %d\n", bh->buf, amount, size - amount_left); /* from upt buffer to file_storeage buffer */ memcpy(bh->buf, data + size - amount_left, amount); amount_left -= amount; fsg->residue -= amount; bh->inreq->length = amount; bh->state = BUF_STATE_FULL; /* Send this buffer and go read some more */ bh->inreq->zero = 0; /* USB Physical transfer: Data from device to host */ start_transfer(fsg, fsg->bulk_in, bh->inreq, &bh->inreq_busy, &bh->state); fsg->next_buffhd_to_fill = bh->next; if (amount_left <= 0) break; } return size - amount_left; }
void SPI::start_transaction(transaction_t *data) { start_transfer(data->tx_buffer, data->tx_length, data->rx_buffer, data->rx_length, data->width, data->callback, data->event); }
static int do_anyka_write(struct fsg_dev *fsg) { struct lun *curlun = fsg->curlun; struct fsg_buffhd *bh; int get_some_more; u32 amount_left_to_req, amount_left_to_write; loff_t file_offset; unsigned int amount; ssize_t nwritten; int rc; /* Carry out the file writes */ get_some_more = 1; file_offset = 0; amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd; while (amount_left_to_write > 0) { /* Queue a request for more data from the host */ bh = fsg->next_buffhd_to_fill; if (bh->state == BUF_STATE_EMPTY && get_some_more) { amount = min(amount_left_to_req, mod_data.buflen); /* Get the next buffer */ fsg->usb_amount_left -= amount; amount_left_to_req -= amount; if (amount_left_to_req == 0) get_some_more = 0; /* amount is always divisible by 512, hence by * the bulk-out maxpacket size */ bh->outreq->length = bh->bulk_out_intended_length = amount; bh->outreq->short_not_ok = 1; start_transfer(fsg, fsg->bulk_out, bh->outreq, &bh->outreq_busy, &bh->state); fsg->next_buffhd_to_fill = bh->next; continue; } /* Write the received data to the backing file */ bh = fsg->next_buffhd_to_drain; if (bh->state == BUF_STATE_EMPTY && !get_some_more) break; // We stopped early if (bh->state == BUF_STATE_FULL) { smp_rmb(); fsg->next_buffhd_to_drain = bh->next; bh->state = BUF_STATE_EMPTY; /* Did something go wrong with the transfer? */ if (bh->outreq->status != 0) { curlun->sense_data = SS_COMMUNICATION_FAILURE; // curlun->sense_data_info = file_offset >> 9; curlun->info_valid = 1; break; } amount = bh->outreq->actual; if (fsg->data_size_from_cmnd - file_offset < amount) { LERROR(curlun, "write %u @ %llu beyond end %llu\n", amount, (unsigned long long) file_offset, (unsigned long long) curlun->file_length); amount = curlun->file_length - file_offset; } /* Perform the write */ nwritten = 0; nwritten = usbburn_write(bh->buf + nwritten, amount); file_offset += nwritten; amount_left_to_write -= nwritten; fsg->residue -= nwritten; /* Did the host decide to stop early? */ if (bh->outreq->actual != bh->outreq->length) { fsg->short_packet_received = 1; break; } continue; } /* Wait for something to happen */ rc = sleep_thread(fsg); if (rc) return rc; } return -EIO; // No default reply }
int transfer_data(spi_bus_t* spi_bus) { volatile uint32_t stat, rxfifo_cnt; volatile uint32_t tmpdin; volatile uint32_t *rx_fifo_p; int txfifo_space; static int first_time = 1; spi_debug_transfer( PRINT_FUNC_NAME "transfer started. Value=%08x, fifo_status = %08x\n", __func__, *spi_bus->txbuf, spi_bus->regs->fifo_status); rx_fifo_p = &spi_bus->regs->rx_fifo; if ((spi_bus->txcnt == 0) && (spi_bus->rxcnt == 0)) { stat = spi_bus->regs->fifo_status; spi_bus->regs->fifo_status = stat; clrsetbits(spi_bus->regs->command1, SPI_CMD1_CS_SW_VAL, SPI_CMD1_RX_EN | SPI_CMD1_TX_EN | SPI_CMD1_LSBY_FE | (SPI_CS_0 << SPI_CMD1_CS_SEL_SHIFT)); clrsetbits(spi_bus->regs->command1, SPI_CMD1_BIT_LEN_MASK << SPI_CMD1_BIT_LEN_SHIFT, (8 - 1) << SPI_CMD1_BIT_LEN_SHIFT ); setbits(spi_bus->regs->xfer_status, SPI_XFER_STS_RDY); first_time = 1; setbits(spi_bus->regs->command2 , ( DARPA_CLK_TAP_DELAY & SPI_CMD2_RX_CLK_TAP_DELAY_MASK)); setbits(spi_bus->regs->timing1 , (DARPA_CS_SETUP_TIME << CS_SETUP_TIME_0_SHIFT)); setbits(spi_bus->regs->timing1 , (DARPA_CS_HOLD_TIME << CS_HOLD_TIME_0_SHIFT)); } dump_regs(); txfifo_space = SPI_TX_FIFO_EMPTY_COUNT(spi_bus->regs->fifo_status); spi_debug("%s, line: %d, txsize: %d, rxsize: %d, rxcnt: %d, txcnt: %d\n", __func__, __LINE__, spi_bus->txsize, spi_bus->rxsize, spi_bus->rxcnt, spi_bus->txcnt); spi_debug("%s, line: %d, txfifo_space: %d\n", __func__, __LINE__, txfifo_space); if (txfifo_space > ((int)(spi_bus->txsize + spi_bus->rxsize - spi_bus->rxcnt))) { //NOT, NOT! Whos there? One. One who? One or zero.......... txfifo_space = MAX((int)(spi_bus->txsize + spi_bus->rxsize - spi_bus->txcnt), 0); } spi_debug("%s %d\n", __func__, __LINE__); if ((spi_bus->regs->xfer_status & SPI_XFER_STS_RDY) || first_time) { spi_debug("\n%s, line: %d, setting block size\n"); spi_bus->regs->dma_blk = txfifo_space - 1; first_time = 0; } if (txfifo_space > DARPA_FIFO_MAX) { txfifo_space = DARPA_FIFO_MAX; } spi_debug("%s, line: %d, txfifo_space: %d\n \n", __func__, __LINE__, txfifo_space); dump_regs(); while (txfifo_space--) { spi_debug_data("%s, line: %d, txbuf: 0x%x txfifo_space: %d\n", __func__, __LINE__, *spi_bus->txbuf, txfifo_space); spi_bus->bytes = 1; dump_regs(); spi_debug_data("%s, line: %d, txbuf: 0x%x\n", __func__, __LINE__, *spi_bus->txbuf); dump_regs(); if (spi_bus->txcnt < spi_bus->txsize) { spi_bus->regs->tx_fifo = *spi_bus->txbuf; spi_bus->txbuf++; } else { spi_bus->regs->tx_fifo = 0; } spi_bus->txcnt++; } if (!(spi_bus->regs->fifo_status & SPI_FIFO_STS_TX_FIFO_EMPTY)) { start_transfer(spi_bus); } stat = spi_bus->regs->fifo_status; rxfifo_cnt = SPI_RX_FIFO_FULL_COUNT(stat); spi_debug("%s, line: %d, rxfifo_cnt: %d\n", __func__, __LINE__, rxfifo_cnt); dump_regs(); while (rxfifo_cnt--) { uint32_t fifo_status, xfer_status; xfer_status = spi_bus->regs->xfer_status; if (!(xfer_status & SPI_XFER_STS_RDY)) { continue; } fifo_status = spi_bus->regs->fifo_status; if (fifo_status & SPI_FIFO_STS_ERR) { spi_debug_error("%s: got a fifo error: ", __func__); print_fifo_error(fifo_status); return 0; break; } spi_debug("%s, line: %d, rxfifo_cnt: %d rxfifo_cnt local: %d\n", __func__, __LINE__, SPI_RX_FIFO_FULL_COUNT(spi_bus->regs->fifo_status), rxfifo_cnt); asm volatile("":::"memory"); tmpdin = *rx_fifo_p; spi_debug("%s, line: %d, rxfifo_cnt: %d, bytes: %u\n", __func__, __LINE__, SPI_RX_FIFO_FULL_COUNT(stat), spi_bus->bytes); if (NULL != spi_bus->rxbuf) { spi_debug_rx("%s, line: %d, rxdata: 0x%08x\n", __func__, __LINE__, (uint32_t)tmpdin); *spi_bus->rxbuf++ = (uint32_t)tmpdin & 0xff; spi_debug("%s, line: %d bytes: %d \n", __func__, __LINE__, spi_bus->bytes); } spi_bus->rxcnt += spi_bus->bytes; } dump_regs(); spi_debug_transfer("%s, line: %d, rxcnt: %d, rxtotal: %d, txtotal: %d\n", __func__, __LINE__, spi_bus->rxcnt, spi_bus->rxtotal, spi_bus->txtotal); if (spi_bus->rxcnt == spi_bus->rxtotal + spi_bus->txtotal) { setbits(spi_bus->regs->dma_ctl , (SPI_IE_TX | SPI_IE_RX)); setbits(spi_bus->regs->xfer_status, SPI_XFER_STS_RDY); spi_debug("%s: transfer ended. Value=%08x, fifo_status = %08x\n", __func__, tmpdin, spi_bus->regs->fifo_status); if (spi_bus->cb) { spi_debug("%s: line: %d calling call back\n", __func__, __LINE__); spi_bus->cb(spi_bus, spi_bus->txcnt, spi_bus->token); } spi_debug_transfer("%s: line: %d transfer complete\n", __func__, __LINE__); return 0; } spi_debug_transfer("%s: line: %d transfer incomplete\n", __func__, __LINE__); return 1; }