static void udd_ctrl_setup_received(void) { irqflags_t flags; if (UDD_EPCTRL_SETUP != udd_ep_control_state) { // May be a hidden DATA or ZLP phase // or protocol abort udd_ctrl_endofrequest(); // Reinitializes control endpoint management udd_ctrl_init(); } // Fill setup request structure if (8 != udd_udesc_get_buf0_ctn(0)) { udd_ctrl_stall_data(); udd_ack_setup_received(0); return; // Error data number doesn't correspond to SETUP packet } memcpy((uint8_t *) & udd_g_ctrlreq.req, udd_ctrl_buffer, 8); // Manage LSB/MSB to fit with CPU usage udd_g_ctrlreq.req.wValue = le16_to_cpu(udd_g_ctrlreq.req.wValue); udd_g_ctrlreq.req.wIndex = le16_to_cpu(udd_g_ctrlreq.req.wIndex); udd_g_ctrlreq.req.wLength = le16_to_cpu(udd_g_ctrlreq.req.wLength); // Decode setup request if (udc_process_setup() == false) { // Setup request unknown then stall it udd_ctrl_stall_data(); udd_ack_setup_received(0); return; } udd_ack_setup_received(0); if (Udd_setup_is_in()) { // IN data phase requested udd_ctrl_prev_payload_nb_trans = 0; udd_ctrl_payload_nb_trans = 0; udd_ep_control_state = UDD_EPCTRL_DATA_IN; udd_ctrl_in_sent(); // Send first data transfer } else { if (0 == udd_g_ctrlreq.req.wLength) { // No data phase requested // Send IN ZLP to ACK setup request udd_ctrl_send_zlp_in(); return; } // OUT data phase requested udd_ctrl_prev_payload_nb_trans = 0; udd_ctrl_payload_nb_trans = 0; udd_ep_control_state = UDD_EPCTRL_DATA_OUT; // To detect a protocol error, enable nak interrupt on data IN phase udd_ack_nak_in(0); flags = cpu_irq_save(); udd_enable_nak_in_interrupt(0); cpu_irq_restore(flags); } }
static void udd_ep_trans_done(udd_ep_id_t ep) { udd_ep_job_t *ptr_job; uint16_t ep_size, nb_trans; uint16_t next_trans; udd_ep_id_t ep_num; irqflags_t flags; ep_num = ep & USB_EP_ADDR_MASK; ep_size = udd_get_endpoint_size(ep_num); // Get job corresponding at endpoint ptr_job = &udd_ep_job[ep_num - 1]; // Disable interrupt of endpoint flags = cpu_irq_save(); udd_disable_endpoint_interrupt(ep_num); cpu_irq_restore(flags); if (!ptr_job->busy) { return; // No job is running, then ignore it (system error) } if (USB_EP_DIR_IN == (ep & USB_EP_DIR_IN)) { // Transfer complete on IN nb_trans = udd_udesc_get_buf0_size(ep_num); // Lock emission of new IN packet udd_enable_busy_bank0(ep_num); // Ack interrupt udd_ack_in_send(ep_num); if (0 == nb_trans) { if (0 == udd_nb_busy_bank(ep_num)) { // All byte are transfered than take nb byte requested nb_trans = udd_udesc_get_buf0_ctn(ep_num); } } // Update number of data transfered ptr_job->nb_trans += nb_trans; // Need to send other data if ((ptr_job->nb_trans != ptr_job->buf_size) || ptr_job->b_shortpacket) { next_trans = ptr_job->buf_size - ptr_job->nb_trans; if (UDD_ENDPOINT_MAX_TRANS < next_trans) { // The USB hardware support a maximum // transfer size of UDD_ENDPOINT_MAX_TRANS Bytes next_trans = UDD_ENDPOINT_MAX_TRANS - (UDD_ENDPOINT_MAX_TRANS % ep_size); udd_udesc_set_buf0_autozlp(ep_num, false); } else { // Need ZLP, if requested and last packet is not a short packet udd_udesc_set_buf0_autozlp(ep_num, ptr_job->b_shortpacket); ptr_job->b_shortpacket = false; // No need to request another ZLP } udd_udesc_set_buf0_ctn(ep_num, next_trans); udd_udesc_rst_buf0_size(ep_num); // Link the user buffer directly on USB hardware DMA udd_udesc_set_buf0_addr(ep_num, &ptr_job->buf[ptr_job->nb_trans]); // Start transfer udd_ack_fifocon(ep_num); udd_disable_busy_bank0(ep_num); // Enable interrupt flags = cpu_irq_save(); udd_enable_in_send_interrupt(ep_num); udd_enable_endpoint_interrupt(ep_num); cpu_irq_restore(flags); return; } } else { // Transfer complete on OUT nb_trans = udd_udesc_get_buf0_ctn(ep_num); // Lock reception of new OUT packet udd_enable_busy_bank0(ep_num); // Ack interrupt udd_ack_out_received(ep_num); udd_ack_fifocon(ep_num); // Can be necessary to copy data receive from cache buffer to user buffer if (ptr_job->b_use_out_cache_buffer) { memcpy(&ptr_job->buf[ptr_job->nb_trans], udd_ep_out_cache_buffer[ep_num - 1], ptr_job->buf_size % ep_size); } // Update number of data transfered ptr_job->nb_trans += nb_trans; if (ptr_job->nb_trans > ptr_job->buf_size) { ptr_job->nb_trans = ptr_job->buf_size; } // If all previous data requested are received and user buffer not full // then need to receive other data if ((nb_trans == udd_udesc_get_buf0_size(ep_num)) && (ptr_job->nb_trans != ptr_job->buf_size)) { next_trans = ptr_job->buf_size - ptr_job->nb_trans; if (UDD_ENDPOINT_MAX_TRANS < next_trans) { // The USB hardware support a maximum transfer size // of UDD_ENDPOINT_MAX_TRANS Bytes next_trans = UDD_ENDPOINT_MAX_TRANS - (UDD_ENDPOINT_MAX_TRANS % ep_size); } else { next_trans -= next_trans % ep_size; } udd_udesc_rst_buf0_ctn(ep_num); if (next_trans < ep_size) { // Use the cache buffer for Bulk or Interrupt size endpoint ptr_job->b_use_out_cache_buffer = true; udd_udesc_set_buf0_addr(ep_num, udd_ep_out_cache_buffer[ep_num-1]); udd_udesc_set_buf0_size(ep_num, ep_size); } else { // Link the user buffer directly on USB hardware DMA udd_udesc_set_buf0_addr(ep_num, &ptr_job->buf[ptr_job->nb_trans]); udd_udesc_set_buf0_size(ep_num, next_trans); } // Start transfer udd_disable_busy_bank0(ep_num); // Enable interrupt flags = cpu_irq_save(); udd_enable_out_received_interrupt(ep_num); udd_enable_endpoint_interrupt(ep_num); cpu_irq_restore(flags); return; } } // Job complete then call callback ptr_job->busy = false; if (NULL != ptr_job->call_trans) { ptr_job->call_trans(UDD_EP_TRANSFER_OK, ptr_job->nb_trans, ep); } return; }
static void udd_ctrl_out_received(void) { irqflags_t flags; uint16_t nb_data; if (UDD_EPCTRL_DATA_OUT != udd_ep_control_state) { if ((UDD_EPCTRL_DATA_IN == udd_ep_control_state) || (UDD_EPCTRL_HANDSHAKE_WAIT_OUT_ZLP == udd_ep_control_state)) { // End of SETUP request: // - Data IN Phase aborted, // - or last Data IN Phase hidden by ZLP OUT sending quickly, // - or ZLP OUT received normally. udd_ctrl_endofrequest(); } else { // Protocol error during SETUP request udd_ctrl_stall_data(); } // Reinitializes control endpoint management udd_ctrl_init(); return; } // Read data received during OUT phase nb_data = udd_udesc_get_buf0_ctn(0); if (udd_g_ctrlreq.payload_size < (udd_ctrl_payload_nb_trans + nb_data)) { // Payload buffer too small nb_data = udd_g_ctrlreq.payload_size - udd_ctrl_payload_nb_trans; } memcpy((uint8_t *) (udd_g_ctrlreq.payload + udd_ctrl_payload_nb_trans), udd_ctrl_buffer, nb_data); udd_ctrl_payload_nb_trans += nb_data; if ((USB_DEVICE_EP_CTRL_SIZE != nb_data) || (udd_g_ctrlreq.req.wLength <= (udd_ctrl_prev_payload_nb_trans + udd_ctrl_payload_nb_trans))) { // End of reception because it is a short packet // Before send ZLP, call intermediate callback // in case of data receive generate a stall udd_g_ctrlreq.payload_size = udd_ctrl_payload_nb_trans; if (NULL != udd_g_ctrlreq.over_under_run) { if (!udd_g_ctrlreq.over_under_run()) { // Stall ZLP udd_ctrl_stall_data(); // Ack reception of OUT to replace NAK by a STALL udd_ack_out_received(0); return; } } // Send IN ZLP to ACK setup request udd_ack_out_received(0); udd_ctrl_send_zlp_in(); return; } if (udd_g_ctrlreq.payload_size == udd_ctrl_payload_nb_trans) { // Overrun then request a new payload buffer if (!udd_g_ctrlreq.over_under_run) { // No callback available to request a new payload buffer udd_ctrl_stall_data(); // Ack reception of OUT to replace NAK by a STALL udd_ack_out_received(0); return; } if (!udd_g_ctrlreq.over_under_run()) { // No new payload buffer delivered udd_ctrl_stall_data(); // Ack reception of OUT to replace NAK by a STALL udd_ack_out_received(0); return; } // New payload buffer available // Update number of total data received udd_ctrl_prev_payload_nb_trans += udd_ctrl_payload_nb_trans; // Reinit reception on payload buffer udd_ctrl_payload_nb_trans = 0; } // Free buffer of control endpoint to authorize next reception udd_ack_out_received(0); // To detect a protocol error, enable nak interrupt on data IN phase udd_ack_nak_in(0); flags = cpu_irq_save(); udd_enable_nak_in_interrupt(0); cpu_irq_restore(flags); }
static void udd_ep_finish_job(udd_ep_id_t ep, bool b_abort) { udd_ep_job_t *ptr_job; uint16_t ep_size; irqflags_t flags; // Get job corresponding at endpoint ptr_job = &udd_ep_job[ep - 1]; // Test if a pending transfer is running. If not, disabled interrupt. if (!ptr_job->busy) { flags = cpu_irq_save(); udd_disable_endpoint_interrupt(ep); cpu_irq_restore(flags); return; } if (Is_udd_endpoint_in(ep)) { // Update number of data transfered ptr_job->nb_trans = udd_udesc_get_buf0_size(ep); if (0 == ptr_job->nb_trans) { if (0 == udd_nb_busy_bank(ep)) { // All byte are transfered than take nb byte requested ptr_job->nb_trans = udd_udesc_get_buf0_ctn(ep); } } } else { // Transfer complete on OUT ep_size = udd_format_endpoint_size(ep); if (ptr_job->b_use_out_cache_buffer) { // Copy data receiv from cache buffer to user buffer memcpy(&ptr_job->buf[ptr_job->nb_trans], udd_ep_out_cache_buffer[ep - 1], ptr_job->buf_size % ep_size); ptr_job->nb_trans += udd_udesc_get_buf0_ctn(ep); } else { ptr_job->nb_trans = udd_udesc_get_buf0_ctn(ep); // If all previous data requested are received // and user buffer not full if ((ptr_job->nb_trans == udd_udesc_get_buf0_size(ep)) && (ptr_job->nb_trans != ptr_job->buf_size)) { // Use the cache buffer to receiv last data // which can be more larger than user buffer remaining ptr_job->b_use_out_cache_buffer = true; udd_udesc_rst_buf0_ctn(ep); udd_udesc_set_buf0_addr(ep, udd_ep_out_cache_buffer[ep - 1]); udd_udesc_set_buf0_size(ep, ep_size); // Free buffer to accept another data to reception udd_ack_out_received(ep); udd_ack_fifocon(ep); return; } } // Free buffer but not accept another data to reception udd_ack_out_received(ep); udd_enable_busy_bank0(ep); udd_ack_fifocon(ep); } // Call callback to signal end of transfer flags = cpu_irq_save(); udd_disable_endpoint_interrupt(ep); cpu_irq_restore(flags); ptr_job->busy = false; if (NULL == ptr_job->call_trans) return; // No callback linked to job ptr_job->call_trans((b_abort) ? UDD_EP_TRANSFER_ABORT : UDD_EP_TRANSFER_OK, ptr_job->nb_trans); }