static void dwc2_ep0_complete_status(struct dwc2 *dwc) { struct dwc2_ep *ep0 = dwc2_ep0_get_ep0(dwc); struct dwc2_request *curr_req; dctl_data_t dctl; /* enter test mode if needed (exit by reset) */ if (dwc->test_mode) { /* * The transition to test mode must be complete no later than 3 ms * after the completion of the status stage of the request. */ dctl.d32 = dwc_readl(&dwc->dev_if.dev_global_regs->dctl); dctl.b.tstctl = dwc->test_mode_nr; dwc_writel(dctl.d32, &dwc->dev_if.dev_global_regs->dctl); } /* * prepare to receive SETUP again to ensure we are prepared!!! */ dwc2_ep0_out_start(dwc); curr_req = next_request(&ep0->request_list); if (curr_req) { dwc2_gadget_giveback(ep0, curr_req, 0); } if (dwc->test_mode) dev_info(dwc->dev, "entering Test Mode(%d)\n", dwc->test_mode_nr); dwc->ep0state = EP0_SETUP_PHASE; }
static void dwc3_ep0_complete_req(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r; struct dwc3_ep *dep; dep = dwc->eps[0]; if (!list_empty(&dep->request_list)) { r = next_request(&dep->request_list); dwc3_gadget_giveback(dep, r, 0); } if (dwc->test_mode) { int ret; ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr); if (ret < 0) { dev_dbg(dwc->dev, "Invalid Test #%d\n", dwc->test_mode_nr); dwc3_ep0_stall_and_restart(dwc); } } dwc->ep0state = EP0_SETUP_PHASE; dwc3_ep0_out_start(dwc); }
static void dwc3_ep0_complete_data(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r = NULL; struct usb_request *ur; struct dwc3_trb *trb; struct dwc3_ep *ep0; u32 transferred; u32 status; u32 length; u8 epnum; epnum = event->endpoint_number; ep0 = dwc->eps[0]; dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS; r = next_request(&ep0->request_list); ur = &r->request; trb = (struct dwc3_trb*)dwc->ep0_trb; status = trb->trbsts; if (status == DWC3_TRBSTS_SETUP_PENDING) { dev_dbg(dwc->dev, "Setup Pending received\n"); if (r) dwc3_gadget_giveback(ep0, r, -ECONNRESET); return; } length = trb->length; if (dwc->ep0_bounced) { unsigned transfer_size = ur->length; unsigned maxp = ep0->endpoint.maxpacket; transfer_size += (maxp - (transfer_size % maxp)); transferred = min_t(u32, ur->length, transfer_size - length); memcpy(ur->buf, dwc->ep0_bounce, transferred); } else { transferred = ur->length - length; } ur->actual += transferred; if ((epnum & 1) && ur->actual < ur->length) { /* for some reason we did not get everything out */ dwc3_ep0_stall_and_restart(dwc); } else { /* * handle the case where we have to send a zero packet. This * seems to be case when req.length > maxpacket. Could it be? */ if (r) dwc3_gadget_giveback(ep0, r, 0); } }
static void dwc2_ep0_stall_and_restart(struct dwc2 *dwc) { struct dwc2_ep *dep; dev_info(dwc->dev, "%s\n", __func__); dep = dwc2_ep0_get_in_ep(dwc); dep->flags = DWC2_EP_ENABLED; dep = dwc2_ep0_get_out_ep(dwc); __dwc2_gadget_ep_set_halt(dep, 1); dep->flags = DWC2_EP_ENABLED; dwc->delayed_status = false; dwc->delayed_status_sent = true; del_timer(&dwc->delayed_status_watchdog); if (!list_empty(&dep->request_list)) { struct dwc2_request *req; req = next_request(&dep->request_list); dwc2_gadget_giveback(dep, req, -ECONNRESET); } dwc->ep0state = EP0_SETUP_PHASE; dwc2_ep0_out_start(dwc); }
static void sunxi_ep0_complete_req(struct sunxi_otgc *otgc, const struct sunxi_otgc_event_depevt *event) { struct sunxi_otgc_request *r; struct sunxi_otgc_ep *dep; dep = otgc->eps[0]; if (!list_empty(&dep->request_list)) { r = next_request(&dep->request_list); sunxi_gadget_giveback(dep, r, 0); } if (otgc->test_mode) { int ret; ret = sunxi_gadget_set_test_mode(otgc->regs, otgc->test_mode_nr); if (ret < 0) { dev_dbg(otgc->dev, "Invalid Test #%d\n", otgc->test_mode_nr); sunxi_ep0_stall_and_restart(otgc); } } otgc->ep0state = EP0_SETUP_PHASE; sunxi_ep0_out_start(otgc); }
static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc) { struct dwc3_ep *dep; /* reinitialize physical ep1 */ dep = dwc->eps[1]; dep->flags = DWC3_EP_ENABLED; /* stall is always issued on EP0 */ dep = dwc->eps[0]; __dwc3_gadget_ep_set_halt(dep, 1); dep->flags = DWC3_EP_ENABLED; dwc->delayed_status = false; if (!list_empty(&dep->request_list)) { struct dwc3_request *req; req = next_request(&dep->request_list); dwc3_gadget_giveback(dep, req, -ECONNRESET); } dwc->ep0state = EP0_SETUP_PHASE; dwc3_ep0_out_start(dwc); }
static void dwc3_ep0_complete_data(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r = NULL; struct usb_request *ur; struct dwc3_trb *trb; struct dwc3_ep *ep0; unsigned maxp; unsigned remaining_ur_length; void *buf; u32 transferred = 0; u32 status; u32 length; u8 epnum; epnum = event->endpoint_number; ep0 = dwc->eps[0]; dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS; trb = dwc->ep0_trb; trace_dwc3_complete_trb(ep0, trb); r = next_request(&ep0->pending_list); if (!r) return; status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (status == DWC3_TRBSTS_SETUP_PENDING) { dwc->setup_packet_pending = true; if (r) dwc3_gadget_giveback(ep0, r, -ECONNRESET); return; } ur = &r->request; buf = ur->buf; remaining_ur_length = ur->length; length = trb->size & DWC3_TRB_SIZE_MASK; maxp = ep0->endpoint.maxpacket; transferred = ur->length - length; ur->actual += transferred; if ((IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) && ur->length && ur->zero) || dwc->ep0_bounced) { trb++; trb->ctrl &= ~DWC3_TRB_CTRL_HWO; trace_dwc3_complete_trb(ep0, trb); ep0->trb_enqueue = 0; dwc->ep0_bounced = false; } if ((epnum & 1) && ur->actual < ur->length) dwc3_ep0_stall_and_restart(dwc); else dwc3_gadget_giveback(ep0, r, 0); }
static void dwc3_ep0_do_control_data(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_ep *dep; struct dwc3_request *req; int ret; dep = dwc->eps[0]; dwc->ep0state = EP0_DATA_PHASE; if (dwc->ep0_status_pending) { dwc3_ep0_send_status_response(dwc); return; } if (list_empty(&dep->request_list)) { dev_vdbg(dwc->dev, "pending request for EP0 Data phase\n"); dep->flags |= DWC3_EP_PENDING_REQUEST; if (event->endpoint_number) dep->flags |= DWC3_EP0_DIR_IN; return; } req = next_request(&dep->request_list); req->direction = !!event->endpoint_number; dwc->ep0state = EP0_DATA_PHASE; if (req->request.length == 0) { ret = dwc3_ep0_start_trans(dwc, event->endpoint_number, dwc->ctrl_req_addr, 0, DWC3_TRBCTL_CONTROL_DATA); } else if ((req->request.length % dep->endpoint.maxpacket) && (event->endpoint_number == 0)) { dwc3_map_buffer_to_dma(req); WARN_ON(req->request.length > dep->endpoint.maxpacket); dwc->ep0_bounced = true; /* * REVISIT in case request length is bigger than EP0 * wMaxPacketSize, we will need two chained TRBs to handle * the transfer. */ ret = dwc3_ep0_start_trans(dwc, event->endpoint_number, dwc->ep0_bounce_addr, dep->endpoint.maxpacket, DWC3_TRBCTL_CONTROL_DATA); } else { dwc3_map_buffer_to_dma(req); ret = dwc3_ep0_start_trans(dwc, event->endpoint_number, req->request.dma, req->request.length, DWC3_TRBCTL_CONTROL_DATA); } WARN_ON(ret < 0); }
/* When receiving RXQ done interrupt, qmu_interrupt calls this function. 1. Traverse GPD/BD data structures to count actual transferred length. 2. Set the done flag to notify rxstate_qmu() to report status to upper gadget driver. ported from proc_qmu_rx() from test driver. caller:qmu_interrupt after getting QMU done interrupt and TX is raised */ void qmu_done_rx(struct musb *musb, u8 ep_num, unsigned long flags) { TGPD *gpd = Rx_gpd_last[ep_num]; TGPD *gpd_current = (TGPD *) (os_readl(USB_QMU_RQCPR(ep_num))); struct musb_ep *musb_ep = &musb->endpoints[ep_num].ep_out; struct usb_request *request = NULL; struct musb_request *req; /* trying to give_back the request to gadget driver. */ req = next_request(musb_ep); if (!req) { qmu_printk(K_ERR, "[RXD]" "%s Cannot get next request of %d, " "but QMU has done.\n", __func__, ep_num); return; } else { request = &req->request; } /*Transfer PHY addr got from QMU register to VIR addr */ gpd_current = phys_to_virt((unsigned long)gpd_current); qmu_printk(K_DEBUG, "[RXD]" "%s EP%d, Last=%p, Current=%p, End=%p\n", __func__, ep_num, gpd, gpd_current, Rx_gpd_end[ep_num]); /*gpd_current should at least point to the next GPD to the previous last one. */ if (gpd == gpd_current) { qmu_printk(K_ERR, "[RXD][ERROR]" "%s gpd(%p) == gpd_current(%p)\n", __func__, gpd, gpd_current); qmu_printk(K_ERR, "[RXD][ERROR]" "EP%d RQCSR=%x, RQSAR=%x, RQCPR=%x, RQLDPR=%x\n", ep_num, os_readl(USB_QMU_RQCSR(ep_num)), os_readl(USB_QMU_RQSAR(ep_num)), os_readl(USB_QMU_RQCPR(ep_num)), os_readl(USB_QMU_RQLDPR(ep_num))); qmu_printk(K_ERR, "[RXD][ERROR]" "QCR0=%x, QCR1=%x, QCR2=%x, QCR3=%x, " "QGCSR=%x\n", os_readl(U3D_QCR0), os_readl(U3D_QCR1), os_readl(U3D_QCR2), os_readl(U3D_QCR3), os_readl(U3D_QGCSR)); qmu_printk(K_INFO, "[RXD][ERROR]" "HWO=%d, Next_GPD=%x ,DataBufLen=%d, " "DataBuf=%x, RecvLen=%d, Endpoint=%d\n", (DEV_UINT32) TGPD_GET_FLAG(gpd), (DEV_UINT32) TGPD_GET_NEXT(gpd), (DEV_UINT32) TGPD_GET_DataBUF_LEN(gpd), (DEV_UINT32) TGPD_GET_DATA(gpd), (DEV_UINT32) TGPD_GET_BUF_LEN(gpd), (DEV_UINT32) TGPD_GET_EPaddr(gpd)); return; } spin_unlock_irqrestore(&musb->lock, flags); /* invalidate GPD data in CPU */ dma_sync_single_for_cpu(musb->controller, virt_to_phys(gpd), sizeof(TGPD), DMA_FROM_DEVICE); spin_lock_irqsave(&musb->lock, flags); if (!gpd || !gpd_current) { qmu_printk(K_ERR, "[RXD][ERROR]" "%s EP%d, gpd=%p, gpd_current=%p, ishwo=%d, \ rx_gpd_last=%p, RQCPR=0x%x\n", __func__, ep_num, gpd, gpd_current, ((gpd == NULL) ? 999 : TGPD_IS_FLAGS_HWO(gpd)), Rx_gpd_last[ep_num], os_readl(USB_QMU_RQCPR(ep_num))); return; }
/* When receiving RXQ done interrupt, qmu_interrupt calls this function. 1. Traverse GPD/BD data structures to count actual transferred length. 2. Set the done flag to notify rxstate_qmu() to report status to upper gadget driver. ported from proc_qmu_rx() from test driver. caller:qmu_interrupt after getting QMU done interrupt and TX is raised */ void qmu_done_rx(struct musb *musb, u8 ep_num, unsigned long flags) { TGPD* gpd = Rx_gpd_last[ep_num]; TGPD* gpd_current = (TGPD*)(uintptr_t)(os_readl(USB_QMU_RQCPR(ep_num))); //QMU GPD address --> CPU DMA address struct musb_ep *musb_ep = &musb->endpoints[ep_num].ep_out; struct usb_request *request = NULL; struct musb_request *req; //trying to give_back the request to gadget driver. req = next_request(musb_ep); if (!req) { qmu_printk(K_ERR, "[RXD]""%s Cannot get next request of %d, " "but QMU has done.\n", __func__, ep_num); return; } else { request = &req->request; } /*Transfer PHY addr got from QMU register to VIR addr*/ gpd_current = gpd_phys_to_virt(gpd_current, USB_RX, ep_num); qmu_printk(K_DEBUG, "[RXD]""%s EP%d, Last=%p, Current=%p, End=%p\n", __func__, ep_num, gpd, gpd_current, Rx_gpd_end[ep_num]); /*gpd_current should at least point to the next GPD to the previous last one.*/ if (gpd == gpd_current) { qmu_printk(K_ERR, "[RXD][ERROR]""%s gpd(%p) == gpd_current(%p)\n", __func__, gpd, \ gpd_current); qmu_printk(K_ERR, "[RXD][ERROR]""EP%d RQCSR=%x, RQSAR=%x, RQCPR=%x, RQLDPR=%x\n", ep_num, os_readl(USB_QMU_RQCSR(ep_num)), os_readl(USB_QMU_RQSAR(ep_num)), os_readl(USB_QMU_RQCPR(ep_num)), os_readl(USB_QMU_RQLDPR(ep_num))); qmu_printk(K_ERR, "[RXD][ERROR]""QCR0=%x, QCR1=%x, QCR2=%x, QCR3=%x, " "QGCSR=%x\n", os_readl(U3D_QCR0), os_readl(U3D_QCR1), os_readl(U3D_QCR2), \ os_readl(U3D_QCR3), os_readl(U3D_QGCSR)); qmu_printk(K_INFO,"[RXD][ERROR]""HWO=%d, Next_GPD=%lx ,DataBufLen=%d, " "DataBuf=%lx, RecvLen=%d, Endpoint=%d\n", (u32)TGPD_GET_FLAG(gpd), (uintptr_t)TGPD_GET_NEXT(gpd), (u32)TGPD_GET_DataBUF_LEN(gpd), (uintptr_t)TGPD_GET_DATA(gpd), (u32)TGPD_GET_BUF_LEN(gpd), (u32)TGPD_GET_EPaddr(gpd)); return; } if(!gpd || !gpd_current) { qmu_printk(K_ERR, "[RXD][ERROR]""%s EP%d, gpd=%p, gpd_current=%p, ishwo=%d, \ rx_gpd_last=%p, RQCPR=0x%x\n", __func__, ep_num, gpd, gpd_current, ((gpd==NULL) ? 999 : TGPD_IS_FLAGS_HWO(gpd)), Rx_gpd_last[ep_num], os_readl(USB_QMU_RQCPR(ep_num))); return; }
static void dwc2_ep0_out_complete_data(struct dwc2 *dwc) { struct dwc2_ep *ep0 = dwc2_ep0_get_ep0(dwc); struct dwc2_dev_if *dev_if = &dwc->dev_if; struct dwc2_request *curr_req; deptsiz0_data_t deptsiz; int trans_count = 0; curr_req = next_request(&ep0->request_list); if (curr_req == NULL) { return; } if (dwc->dma_enable) { if (dwc->dma_desc_enable) { /* TODO: Scatter/Gather DMA Mode here! */ } else { deptsiz.d32 = dwc_readl(&dev_if->out_ep_regs[0]->doeptsiz); /* The Programming Guide said xfercompl raised when xfersize and pktcnt gets zero */ WARN(deptsiz.b.xfersize, "%s: xfersize not zero when transfer complete\n", __func__); trans_count = curr_req->xfersize - deptsiz.b.xfersize; } curr_req->trans_count_left -= trans_count; if (curr_req->trans_count_left < 0) { curr_req->trans_count_left = 0; } //curr_req->next_dma_addr += trans_count; curr_req->request.actual += trans_count; /* copy from shadow buffer to real buffer */ if (curr_req->request.length != 0) { int offset = curr_req->request.actual; u32 buf_addr = (u32)curr_req->request.buf; memcpy( (void *)(buf_addr + offset), (void *)dwc->ep0out_shadow_uncached, trans_count); } /* if xfersize is not zero, we receive an short packet, the transfer is complete */ if (!deptsiz.b.xfersize && (curr_req->trans_count_left || need_send_zlp(curr_req))) { dwc2_ep0_start_transfer(dwc, curr_req); } else { dwc2_ep0_do_status_phase(dwc); } } else { /* TODO: PIOMode */ } }
pf_adaptor::pf_adaptor(QObject *parent) : QObject(parent), tr_tx_p(new QThread()), tr_cc_p(new QThread()), tx_p(new pf_transmitter()), controller_p(new pf_controller()) { connect(this, SIGNAL(open_serial(QString, QString, qint32)), tx_p, SLOT(open_serial(QString, QString, qint32)), Qt::BlockingQueuedConnection); connect(this, SIGNAL(close_serial(void)), tx_p, SLOT(close_port(void)), Qt::BlockingQueuedConnection); // connect(this, SIGNAL(request(QByteArray, bool)), // tx_p, SLOT(transmitt(QByteArray, bool)), Qt::BlockingQueuedConnection); connect(this, SIGNAL(request(QByteArray, bool)), controller_p, SLOT(single_request(QByteArray, bool)), Qt::BlockingQueuedConnection); connect(controller_p, SIGNAL(transmitt(QByteArray, bool)), tx_p, SLOT(transmitt(QByteArray, bool)), Qt::BlockingQueuedConnection); // connect(tx_p, SIGNAL(reply(QByteArray /*reply*/, QByteArray /*request*/, qint32 /*time*/ )), // this, SIGNAL(reply(QByteArray /*reply*/, QByteArray /*request*/, qint32 /*time*/ )), Qt::QueuedConnection); connect(tx_p, SIGNAL(reply(pf_reply)), this, SIGNAL(reply(pf_reply))); connect(tx_p ,SIGNAL(error(pf_error)), this,SIGNAL(error(pf_error)), Qt::QueuedConnection); connect(tx_p ,SIGNAL(ready()), controller_p,SLOT(next_request()), Qt::QueuedConnection); connect(this ,SIGNAL(add_cyclic(QByteArray, quint32, quint32, bool)), controller_p,SLOT(add(QByteArray, quint32, quint32, bool)), Qt::QueuedConnection); connect(this ,SIGNAL(stop_cyclic()), controller_p,SLOT(stop()), Qt::BlockingQueuedConnection); connect(this ,SIGNAL(reset_cyclic()), controller_p,SLOT(reset()), Qt::BlockingQueuedConnection); connect(this ,SIGNAL(start_cyclic()), controller_p,SLOT(start()), Qt::BlockingQueuedConnection); //TODO Can work in existing thread tx_p->moveToThread(tr_tx_p); controller_p->moveToThread(tr_cc_p); tr_tx_p->start(); tr_cc_p->start(); //tr_cc_p->setPriority(QThread::HighPriority); tr_tx_p->setPriority(QThread::TimeCriticalPriority); }
static void dwc3_ep0_complete_data(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r = NULL; struct usb_request *ur; struct dwc3_trb trb; struct dwc3_ep *dep; u32 transferred; u8 epnum; epnum = event->endpoint_number; dep = dwc->eps[epnum]; dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS; if (!dwc->ep0_status_pending) { r = next_request(&dwc->eps[0]->request_list); ur = &r->request; } else { ur = &dwc->ep0_usb_req; dwc->ep0_status_pending = 0; } dwc3_trb_to_nat(dwc->ep0_trb, &trb); if (dwc->ep0_bounced) { struct dwc3_ep *ep0 = dwc->eps[0]; transferred = min_t(u32, ur->length, ep0->endpoint.maxpacket - trb.length); memcpy(ur->buf, dwc->ep0_bounce, transferred); dwc->ep0_bounced = false; } else { transferred = ur->length - trb.length; } ur->actual += transferred; if ((epnum & 1) && ur->actual < ur->length) { /* for some reason we did not get everything out */ dwc3_ep0_stall_and_restart(dwc); } else { /* * handle the case where we have to send a zero packet. This * seems to be case when req.length > maxpacket. Could it be? */ if (r) dwc3_gadget_giveback(dep, r, 0); } }
static void sunxi_ep0_complete_data(struct sunxi_otgc *otgc, const struct sunxi_otgc_event_depevt *event) { struct sunxi_otgc_request *r = NULL; struct usb_request *ur; struct sunxi_otgc_trb *trb; struct sunxi_otgc_ep *ep0; u32 transferred; u32 length; u8 epnum; epnum = event->endpoint_number; ep0 = otgc->eps[0]; otgc->ep0_next_event = SUNXI_EP0_NRDY_STATUS; r = next_request(&ep0->request_list); ur = &r->request; trb = otgc->ep0_trb; length = trb->size & SUNXI_TRB_SIZE_MASK; if (otgc->ep0_bounced) { unsigned transfer_size = ur->length; unsigned maxp = ep0->endpoint.maxpacket; transfer_size += (maxp - (transfer_size % maxp)); transferred = min_t(u32, ur->length, transfer_size - length); memcpy(ur->buf, otgc->ep0_bounce, transferred); } else { transferred = ur->length - length; } ur->actual += transferred; if ((epnum & 1) && ur->actual < ur->length) { /* for some reason we did not get everything out */ sunxi_ep0_stall_and_restart(otgc); } else { /* * handle the case where we have to send a zero packet. This * seems to be case when req.length > maxpacket. Could it be? */ if (r) sunxi_gadget_giveback(ep0, r, 0); } }
static void dwc3_ep0_complete_data(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r = NULL; struct usb_request *ur; struct dwc3_trb trb; struct dwc3_ep *dep; u32 transfered; u8 epnum; epnum = event->endpoint_number; dep = dwc->eps[epnum]; if (!dwc->ep0_status_pending) { r = next_request(&dep->request_list); ur = &r->request; } else { ur = &dwc->ep0_usb_req; dwc->ep0_status_pending = 0; } dwc3_trb_to_nat(dwc->ep0_trb, &trb); transfered = ur->length - trb.length; ur->actual += transfered; if ((epnum & 1) && ur->actual < ur->length) { /* for some reason we did not get everything out */ dwc3_ep0_stall_and_restart(dwc); dwc3_gadget_giveback(dep, r, -ECONNRESET); } else { /* * handle the case where we have to send a zero packet. This * seems to be case when req.length > maxpacket. Could it be? */ /* The transfer is complete, wait for HOST */ if (epnum & 1) dwc->ep0state = EP0_IN_WAIT_NRDY; else dwc->ep0state = EP0_OUT_WAIT_NRDY; if (r) dwc3_gadget_giveback(dep, r, 0); } }
static void manage_single_request(int peer_sfd) { s_start(&requests_time); http_request_t *request = (http_request_t*)malloc(sizeof(http_request_t)); http_response_t *response = (http_response_t*)malloc(sizeof(http_response_t)); strcpy(response->resource_path,path_root); next_request(peer_sfd, request); build_response(request, response); send_response(peer_sfd, response); clear_responses(response); free(request); free(response); s_stop(&requests_time); get_time_difference(&requests_time); }
static void dwc3_ep0_complete_req(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r; struct dwc3_ep *dep; dep = dwc->eps[0]; if (!list_empty(&dep->request_list)) { r = next_request(&dep->request_list); dwc3_gadget_giveback(dep, r, 0); } dwc->ep0state = EP0_SETUP_PHASE; dwc3_ep0_out_start(dwc); }
static void dwc3_ep0_complete_data(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r = NULL; struct usb_request *ur; struct dwc3_trb *trb; struct dwc3_ep *ep0; u32 transferred; u32 length; u8 epnum; epnum = event->endpoint_number; ep0 = dwc->eps[0]; dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS; r = next_request(&ep0->request_list); ur = &r->request; trb = dwc->ep0_trb; length = trb->size & DWC3_TRB_SIZE_MASK; if (dwc->ep0_bounced) { transferred = min_t(u32, ur->length, ep0->endpoint.maxpacket - length); memcpy(ur->buf, dwc->ep0_bounce, transferred); dwc->ep0_bounced = false; } else { transferred = ur->length - length; ur->actual += transferred; } if ((epnum & 1) && ur->actual < ur->length) { /* for some reason we did not get everything out */ dwc3_ep0_stall_and_restart(dwc); } else { /* * handle the case where we have to send a zero packet. This * seems to be case when req.length > maxpacket. Could it be? */ if (r) dwc3_gadget_giveback(ep0, r, 0); } }
static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc) { struct dwc3_ep *dep = dwc->eps[0]; /* stall is always issued on EP0 */ __dwc3_gadget_ep_set_halt(dwc->eps[0], 1); dwc->eps[0]->flags = DWC3_EP_ENABLED; if (!list_empty(&dep->request_list)) { struct dwc3_request *req; req = next_request(&dep->request_list); dwc3_gadget_giveback(dep, req, -ECONNRESET); } dwc->ep0state = EP0_SETUP_PHASE; dwc3_ep0_out_start(dwc); }
static void dwc3_ep0_complete_req(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r; struct dwc3_ep *dep; u8 epnum; epnum = event->endpoint_number; dep = dwc->eps[epnum]; if (!list_empty(&dep->request_list)) { r = next_request(&dep->request_list); dwc3_gadget_giveback(dep, r, 0); } dwc->ep0state = EP0_IDLE; dwc3_ep0_out_start(dwc); }
static void sunxi_ep0_stall_and_restart(struct sunxi_otgc *otgc) { struct sunxi_otgc_ep *dep = otgc->eps[0]; /* stall is always issued on EP0 */ __sunxi_gadget_ep_set_halt(dep, 1); dep->flags = SUNXI_EP_ENABLED; otgc->delayed_status = false; if (!list_empty(&dep->request_list)) { struct sunxi_otgc_request *req; req = next_request(&dep->request_list); sunxi_gadget_giveback(dep, req, -ECONNRESET); } otgc->ep0state = EP0_SETUP_PHASE; sunxi_ep0_out_start(otgc); }
static void dwc2_ep0_in_complete_data(struct dwc2 *dwc) { struct dwc2_ep *ep0 = dwc2_ep0_get_ep0(dwc); struct dwc2_dev_if *dev_if = &dwc->dev_if; struct dwc2_request *curr_req; deptsiz0_data_t deptsiz; int trans_count = 0; curr_req = next_request(&ep0->request_list); if (curr_req == NULL) { return; } if (dwc->dma_enable) { if (dwc->dma_desc_enable) { /* TODO: Scatter/Gather DMA Mode here! */ } else { deptsiz.d32 = dwc_readl(&dev_if->in_ep_regs[0]->dieptsiz); /* The Programming Guide said xfercompl raised when xfersize and pktcnt gets zero */ WARN(deptsiz.b.xfersize, "%s: xfersize not zero when transfer complete\n", __func__); trans_count = curr_req->xfersize - deptsiz.b.xfersize; } curr_req->trans_count_left -= trans_count; if (curr_req->trans_count_left < 0) curr_req->trans_count_left = 0; curr_req->next_dma_addr += trans_count; if (curr_req->trans_count_left || need_send_zlp(curr_req)) { DWC2_EP0_DEBUG_MSG("req 0x%p continue transfer, is_in = %d\n", curr_req, curr_req->dwc2_ep->is_in); dwc2_ep0_start_transfer(dwc, curr_req); } else { DWC2_EP0_DEBUG_MSG("req 0x%p done, do %s status phase\n", curr_req, dwc->ep0_expect_in ? "OUT" : "IN"); dwc2_ep0_do_status_phase(dwc); } } else { /* TODO: PIO Mode */ } }
static void dwc3_ep0_complete_status(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r; struct dwc3_ep *dep; struct dwc3_trb *trb; u32 status; dep = dwc->eps[0]; trb = dwc->ep0_trb; trace_dwc3_complete_trb(dep, trb); if (!list_empty(&dep->request_list)) { r = next_request(&dep->request_list); dwc3_gadget_giveback(dep, r, 0); } if (dwc->test_mode) { int ret; ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr); if (ret < 0) { dwc3_trace(trace_dwc3_ep0, "Invalid Test #%d", dwc->test_mode_nr); dwc3_ep0_stall_and_restart(dwc); return; } } status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (status == DWC3_TRBSTS_SETUP_PENDING) { dwc->setup_packet_pending = true; dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); } dwc->ep0state = EP0_SETUP_PHASE; dwc3_ep0_out_start(dwc); }
static void dwc3_ep0_complete_status(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r; struct dwc3_ep *dep; struct dwc3_trb *trb; u32 status; dep = dwc->eps[0]; trb = dwc->ep0_trb; if (!list_empty(&dep->request_list)) { r = next_request(&dep->request_list); dwc3_gadget_giveback(dep, r, 0); } if (dwc->test_mode) { int ret; ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr); if (ret < 0) { dev_dbg(dwc->dev, "Invalid Test #%d\n", dwc->test_mode_nr); dbg_event(0x00, "INVALTEST", ret); dwc3_ep0_stall_and_restart(dwc); return; } } status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (status == DWC3_TRBSTS_SETUP_PENDING) dev_dbg(dwc->dev, "Setup Pending received\n"); dbg_print(dep->number, "DONE", status, "STATUS"); dwc->ep0state = EP0_SETUP_PHASE; dwc3_ep0_out_start(dwc); }
/* 1. Find the last gpd HW has executed and update Tx_gpd_last[] 2. Set the flag for txstate to know that TX has been completed ported from proc_qmu_tx() from test driver. caller:qmu_interrupt after getting QMU done interrupt and TX is raised */ void qmu_done_tx(struct musb *musb, u8 ep_num, unsigned long flags) { TGPD *gpd = Tx_gpd_last[ep_num]; TGPD *gpd_current = (TGPD *) (os_readl(USB_QMU_TQCPR(ep_num))); struct musb_ep *musb_ep = &musb->endpoints[ep_num].ep_in; struct usb_request *request = NULL; struct musb_request *req; /* trying to give_back the request to gadget driver. */ req = next_request(musb_ep); if (!req) { qmu_printk(K_INFO, "[TXD]" "%s Cannot get next request of %d, " "but QMU has done.\n", __func__, ep_num); return; } else { request = &req->request; } /*Transfer PHY addr got from QMU register to VIR addr */ gpd_current = phys_to_virt((unsigned long)gpd_current); /* gpd or Last gdp_current | | |-> GPD1 --> GPD2 --> GPD3 --> GPD4 --> GPD5 -| |----------------------------------------------| */ qmu_printk(K_DEBUG, "[TXD]" "%s EP%d, Last=%p, Current=%p, End=%p\n", __func__, ep_num, gpd, gpd_current, Tx_gpd_end[ep_num]); /*gpd_current should at least point to the next GPD to the previous last one. */ if (gpd == gpd_current) { qmu_printk(K_ERR, "[TXD][ERROR]" "%s gpd(%p) == gpd_current(%p)\n", __func__, gpd, gpd_current); return; } spin_unlock_irqrestore(&musb->lock, flags); /* flush data from device to CPU */ dma_sync_single_for_cpu(musb->controller, virt_to_phys(gpd), sizeof(TGPD), DMA_FROM_DEVICE); spin_lock_irqsave(&musb->lock, flags); if (TGPD_IS_FLAGS_HWO(gpd)) { qmu_printk(K_DEBUG, "[TXD]" "%s HWO=1, CPR=%x\n", __func__, os_readl(USB_QMU_TQCPR(ep_num))); BUG_ON(1); } while (gpd != gpd_current && !TGPD_IS_FLAGS_HWO(gpd)) { qmu_printk(K_DEBUG, "[TXD]" "gpd=%p ->HWO=%d, BPD=%d, Next_GPD=%x, DataBuffer=%x, " "BufferLen=%d request=%p\n", gpd, (u32) TGPD_GET_FLAG(gpd), (u32) TGPD_GET_FORMAT(gpd), (u32) TGPD_GET_NEXT(gpd), (u32) TGPD_GET_DATA(gpd), (u32) TGPD_GET_BUF_LEN(gpd), req); if (!TGPD_GET_NEXT(gpd)) { qmu_printk(K_ERR, "[TXD][ERROR]" "Next GPD is null!!\n"); /* BUG_ON(1); */ break; } gpd = TGPD_GET_NEXT(gpd); spin_unlock_irqrestore(&musb->lock, flags); /*flush data from device to CPU */ dma_sync_single_for_cpu(musb->controller, (dma_addr_t) gpd, sizeof(TGPD), DMA_FROM_DEVICE); spin_lock_irqsave(&musb->lock, flags); gpd = phys_to_virt((unsigned long)gpd); Tx_gpd_last[ep_num] = gpd; musb_g_giveback(musb_ep, request, 0); req = next_request(musb_ep); request = &req->request; } if (gpd != gpd_current && TGPD_IS_FLAGS_HWO(gpd)) { qmu_printk(K_ERR, "[TXD][ERROR]" "EP%d TQCSR=%x, TQSAR=%x, TQCPR=%x\n", ep_num, os_readl(USB_QMU_TQCSR(ep_num)), os_readl(USB_QMU_TQSAR(ep_num)), os_readl(USB_QMU_TQCPR(ep_num))); qmu_printk(K_ERR, "[TXD][ERROR]" "QCR0=%x, QCR1=%x, QCR2=%x, QCR3=%x, " "QGCSR=%x\n", os_readl(U3D_QCR0), os_readl(U3D_QCR1), os_readl(U3D_QCR2), os_readl(U3D_QCR3), os_readl(U3D_QGCSR)); qmu_printk(K_ERR, "[TXD][ERROR]" "HWO=%d, BPD=%d, Next_GPD=%x, DataBuffer=%x, " "BufferLen=%d, Endpoint=%d\n", (DEV_UINT32) TGPD_GET_FLAG(gpd), (DEV_UINT32) TGPD_GET_FORMAT(gpd), (DEV_UINT32) TGPD_GET_NEXT(gpd), (DEV_UINT32) TGPD_GET_DATA(gpd), (DEV_UINT32) TGPD_GET_BUF_LEN(gpd), (DEV_UINT32) TGPD_GET_EPaddr(gpd)); } qmu_printk(K_DEBUG, "[TXD]" "%s EP%d, Last=%p, End=%p, complete\n", __func__, ep_num, Tx_gpd_last[ep_num], Tx_gpd_end[ep_num]); if (req != NULL) { if (request->length == 0) { qmu_printk(K_DEBUG, "[TXD]" "==Send ZLP== %p\n", req); while (!(USB_ReadCsr32(U3D_TX1CSR0, req->epnum) & TX_FIFOFULL)) { USB_WriteCsr32(U3D_TX1CSR0, req->epnum, USB_ReadCsr32(U3D_TX1CSR0, req->epnum) | TX_TXPKTRDY); break; } qmu_printk(K_DEBUG, "[TXD]" "Giveback ZLP of EP%d, actual:%d, length:%d %p\n", req->epnum, request->actual, request->length, request); musb_g_giveback(musb_ep, request, 0); } } }
int main(int argc, char **argv) { struct set *ss; struct remote *rx; struct katcl_parse *px; struct katcl_line *k; struct timeval delta, start, stop; fd_set fsr, fsw; char *app, *parm, *cmd, *copy, *ptr, *servers, *extra, *label; int i, j, c, fd, mfd, count; int verbose, result, status, info, timeout, flags, show, munge, once; int xmit, code; unsigned int len; servers = getenv("KATCP_SERVER"); if(servers == NULL){ servers = "localhost:7147"; } once = 1; munge = 0; info = 1; verbose = 1; i = j = 1; app = argv[0]; timeout = 0; k = NULL; show = 1; parm = NULL; extra = NULL; label = KCPPAR_NAME; count = 0; px = NULL; k = create_katcl(STDOUT_FILENO); if(k == NULL){ fprintf(stderr, "%s: unable to create katcp message logic\n", app); return 4; } ss = create_set(); if(ss == NULL){ sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "unable to set up command set data structures"); return 4; } xmit = (-1); while (i < argc) { if (argv[i][0] == '-') { c = argv[i][j]; switch (c) { case 'h' : usage(app); return 0; case 'i' : info = 1 - info; j++; break; case 'm' : munge = 1; j++; break; case 'n' : show = 0; j++; break; case 'q' : verbose = 0; j++; break; case 'x' : xmit = 0; j++; break; case 'l' : case 's' : case 't' : j++; if (argv[i][j] == '\0') { j = 0; i++; } if (i >= argc) { sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "argument needs a parameter"); return 2; } switch(c){ case 'l' : label = argv[i] + j; break; case 's' : servers = argv[i] + j; break; case 't' : timeout = atoi(argv[i] + j); break; } i++; j = 1; break; case 'v' : verbose++; j++; break; case '-' : j++; break; case '\0': j = 1; i++; break; default: sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "unknown option -%c", argv[i][j]); return 2; } } else { copy = NULL; if(xmit < 0){ /* WARNING: this could make error detection worse */ xmit = 0; } if(xmit == 0){ px = create_referenced_parse_katcl(); if(px == NULL){ sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "unable to create parse instance for <%s ...>", argv[i]); return 4; } switch(argv[i][0]){ case KATCP_REQUEST : case KATCP_REPLY : case KATCP_INFORM : ptr = argv[i]; break; default : copy = malloc(strlen(argv[i]) + 1); if(copy == NULL){ sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "unable to allocate temporary storage for %s", argv[i]); return 4; } copy[0] = KATCP_REQUEST; strcpy(copy + 1, argv[i]); ptr = copy; break; } flags = KATCP_FLAG_FIRST; } else { ptr = argv[i]; flags = 0; } i++; j = 1; if((i >= argc) || (argv[i][0] == '-')){ flags |= KATCP_FLAG_LAST; } if(add_string_parse_katcl(px, flags, ptr) < 0){ sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "unable to add parameter %s", ptr); return 4; } if(flags & KATCP_FLAG_LAST){ #ifdef DEBUG fprintf(stderr, "par: loading command for servers %s\n", servers); #endif if(load_parse_set(ss, servers, px) < 0){ sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "unable to load command into server set %s", servers); return 4; } destroy_parse_katcl(px); px = NULL; } if(copy){ free(copy); copy = NULL; } xmit++; } } if(timeout == 0){ timeout = 5000 * ss->s_count; } gettimeofday(&start, NULL); delta.tv_sec = timeout / 1000; delta.tv_usec = (timeout % 1000) * 1000; add_time_katcp(&stop, &start, &delta); if(activate_remotes(ss, k) < 0){ sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "unable to initiate connections to remote servers"); return 3; } for(ss->s_finished = 0; ss->s_finished < ss->s_count;){ mfd = 0; FD_ZERO(&fsr); FD_ZERO(&fsw); if(k){ if(flushing_katcl(k)){ mfd = fileno_katcl(k); FD_SET(mfd, &fsw); } } for(i = 0; i < ss->s_count; i++){ rx = ss->s_vector[i]; if(rx->r_line){ fd = fileno_katcl(rx->r_line); if(fd > mfd){ mfd = fd; } } else { fd = (-1); /* WARNING: live dangerously */ } switch(rx->r_state){ case RX_SETUP : FD_SET(fd, &fsw); break; case RX_UP : if(flushing_katcl(rx->r_line)){ /* only write data if we have some */ FD_SET(fd, &fsw); } FD_SET(fd, &fsr); break; /* case RX_OK : */ /* case RX_FAIL : */ /* case RX_BAD : */ default : break; } } gettimeofday(&start, NULL); sub_time_katcp(&delta, &stop, &start); result = select(mfd + 1, &fsr, &fsw, NULL, &delta); switch(result){ case -1 : switch(errno){ case EAGAIN : case EINTR : continue; /* WARNING */ default : sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "select failed: %s", strerror(errno)); return 4; } break; case 0 : sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "requests timed out after %dms", timeout); /* could terminate cleanly here, but ... */ return 3; } if(k){ fd = fileno_katcl(k); if(FD_ISSET(fd, &fsw)){ write_katcl(k); /* WARNING: ignores write failures - unable to do much about it */ } } for(i = 0; i < ss->s_count; i++){ rx = ss->s_vector[i]; if(rx->r_line){ fd = fileno_katcl(rx->r_line); } else { fd = (-1); /* WARNING: live dangerously, will cause select to core dump if logic is incorrect */ } switch(rx->r_state){ case RX_SETUP : if(FD_ISSET(fd, &fsw)){ len = sizeof(int); result = getsockopt(fd, SOL_SOCKET, SO_ERROR, &code, &len); if(result == 0){ switch(code){ case 0 : if(verbose){ log_message_katcl(k, KATCP_LEVEL_DEBUG, label, "async connect to %s succeeded", rx->r_name); } if(next_request(rx) < 0){ log_message_katcl(k, KATCP_LEVEL_ERROR, label, "failed to load request for destination %s", rx->r_name); update_state(ss, rx, RX_BAD); } else { update_state(ss, rx, RX_UP); } break; case EINPROGRESS : log_message_katcl(k, KATCP_LEVEL_WARN, label, "saw an in progress despite write set being ready on job %s", rx->r_name); break; default : log_message_katcl(k, KATCP_LEVEL_ERROR, label, "unable to connect to %s: %s", rx->r_name, strerror(code)); update_state(ss, rx, RX_BAD); break; } } } break; case RX_UP : if(FD_ISSET(fd, &fsw)){ /* flushing things */ result = write_katcl(rx->r_line); if(result < 0){ log_message_katcl(k, KATCP_LEVEL_ERROR, label, "unable to write to %s: %s", rx->r_name, strerror(error_katcl(rx->r_line))); update_state(ss, rx, RX_BAD); } } if(FD_ISSET(fd, &fsr)){ /* get things */ result = read_katcl(rx->r_line); if(result){ if(result < 0){ log_message_katcl(k, KATCP_LEVEL_ERROR, label, "read from %s failed: %s", rx->r_name, strerror(error_katcl(rx->r_line))); } else { log_message_katcl(k, KATCP_LEVEL_WARN, label, "%s disconnected", rx->r_name); } } } while(have_katcl(rx->r_line) > 0){ /* compute */ cmd = arg_string_katcl(rx->r_line, 0); if(cmd){ #ifdef DEBUG fprintf(stderr, "reading message <%s ...>\n", cmd); #endif switch(cmd[0]){ case KATCP_INFORM : if(info){ if(show == 0){ if(!strcmp(KATCP_VERSION_CONNECT_INFORM, cmd)){ break; } if(!strcmp(KATCP_VERSION_INFORM, cmd)){ break; } if(!strcmp(KATCP_BUILD_STATE_INFORM, cmd)){ break; } } relay_katcl(rx->r_line, k); } break; case KATCP_REPLY : switch(cmd[1]){ case ' ' : case '\n' : case '\r' : case '\t' : case '\\' : case '\0' : log_message_katcl(k, KATCP_LEVEL_ERROR, label, "unreasonable response message from %s", rx->r_name); update_state(ss, rx, RX_BAD); break; default : ptr = cmd + 1; if(strcmp(ptr, rx->r_match)){ log_message_katcl(k, KATCP_LEVEL_ERROR, label, "downstream %s returned response %s which was never requested", rx->r_name, ptr); update_state(ss, rx, RX_BAD); } else { parm = arg_string_katcl(rx->r_line, 1); if(parm){ if(strcmp(parm, KATCP_OK) == 0){ count++; if(munge){ log_message_katcl(k, KATCP_LEVEL_INFO, label, "%s %s ok", rx->r_name, ptr); } if(verbose > 1){ log_message_katcl(k, KATCP_LEVEL_TRACE, label, "request %s to %s returned ok", ptr, rx->r_name); } result = next_request(rx); if(result){ if(result < 0){ sync_message_katcl(k, KATCP_LEVEL_ERROR, label, "unable to queue request %s to %s", ptr, rx->r_name); update_state(ss, rx, RX_BAD); } else { update_state(ss, rx, RX_OK); } } } else { if(munge){ extra = arg_string_katcl(rx->r_line, 2); log_message_katcl(k, KATCP_LEVEL_ERROR, label, "%s %s %s (%s)", rx->r_name, ptr, parm, extra ? extra : "no extra information"); } if(verbose > 0){ log_message_katcl(k, KATCP_LEVEL_ERROR, label, "downstream %s unable to process %s with status %s (%s)", rx->r_name, cmd, parm, extra ? extra : "no extra information"); } update_state(ss, rx, RX_FAIL); } } else { log_message_katcl(k, KATCP_LEVEL_ERROR, label, "response %s without status from %s", cmd, rx->r_name); update_state(ss, rx, RX_FAIL); } } break; } break; case KATCP_REQUEST : log_message_katcl(k, KATCP_LEVEL_WARN, label, "encountered unanswerable request %s", cmd); update_state(ss, rx, RX_BAD); break; default : if(once){ log_message_katcl(k, KATCP_LEVEL_WARN, label, "read malformed message %s from %s", cmd, rx->r_name); once = 1; } break; } } } break; /* case RX_OK : */ /* case RX_FAIL : */ /* case RX_BAD : */ default : break; } } } status = ss->s_status; destroy_set(ss); if(verbose){ if(status > 0){ log_message_katcl(k, KATCP_LEVEL_WARN, label, "command sequence failed after operation %d", count); } else { if(count > 0){ log_message_katcl(k, KATCP_LEVEL_INFO, label, "%d operations ok", count); } else { log_message_katcl(k, KATCP_LEVEL_INFO, label, "did nothing successfully"); } } } /* flush, allows us to get away with deferring writes to stdout */ while(write_katcl(k) == 0); destroy_katcl(k, 0); return status; }
static void sunxi_ep0_do_control_data(struct sunxi_otgc *otgc, const struct sunxi_otgc_event_depevt *event) { struct sunxi_otgc_ep *dep; struct sunxi_otgc_request *req; int ret; dep = otgc->eps[0]; if (list_empty(&dep->request_list)) { dev_vdbg(otgc->dev, "pending request for EP0 Data phase\n"); dep->flags |= SUNXI_EP_PENDING_REQUEST; if (event->endpoint_number) dep->flags |= SUNXI_EP0_DIR_IN; return; } req = next_request(&dep->request_list); req->direction = !!event->endpoint_number; if (req->request.length == 0) { ret = sunxi_ep0_start_trans(otgc, event->endpoint_number, otgc->ctrl_req_addr, 0, SUNXI_TRBCTL_CONTROL_DATA); } else if ((req->request.length % dep->endpoint.maxpacket) && (event->endpoint_number == 0)) { ret = usb_gadget_map_request(&otgc->gadget, &req->request, event->endpoint_number); if (ret) { dev_dbg(otgc->dev, "failed to map request\n"); return; } WARN_ON(req->request.length > dep->endpoint.maxpacket); otgc->ep0_bounced = true; /* * REVISIT in case request length is bigger than EP0 * wMaxPacketSize, we will need two chained TRBs to handle * the transfer. */ ret = sunxi_ep0_start_trans(otgc, event->endpoint_number, otgc->ep0_bounce_addr, dep->endpoint.maxpacket, SUNXI_TRBCTL_CONTROL_DATA); } else { ret = usb_gadget_map_request(&otgc->gadget, &req->request, event->endpoint_number); if (ret) { dev_dbg(otgc->dev, "failed to map request\n"); return; } ret = sunxi_ep0_start_trans(otgc, event->endpoint_number, req->request.dma, req->request.length, SUNXI_TRBCTL_CONTROL_DATA); } WARN_ON(ret < 0); }
static void read_cb (PnNode *conn, gpointer data) { RoamingRequest *roaming_request; GIOStatus status = G_IO_STATUS_NORMAL; gchar *str = NULL; gboolean got_hostname = FALSE; roaming_request = data; while (roaming_request->parser_state == 0) { gsize terminator_pos; status = pn_parser_read_line (roaming_request->parser, &str, NULL, &terminator_pos, NULL); if (status == G_IO_STATUS_AGAIN) return; if (status != G_IO_STATUS_NORMAL) goto leave; if (str) { str[terminator_pos] = '\0'; if (strncmp (str, "Content-Length: ", 16) == 0) roaming_request->content_size = atoi(str + 16); if (strncmp (str, "Location: ", 10) == 0) roaming_request->location = g_strdup (str + 10); /* now comes the content */ if (str[0] == '\0') { roaming_request->parser_state++; break; } g_free (str); } } if (roaming_request->parser_state == 1) { gchar *body; status = pn_parser_read (roaming_request->parser, &body, roaming_request->content_size, NULL); if (status == G_IO_STATUS_AGAIN) return; if (status != G_IO_STATUS_NORMAL) goto leave; if (roaming_request->location != NULL) { gchar *cur; cur = strstr (roaming_request->location, "://") + 3; if (cur) { gchar *end = strchr (cur, '/'); g_free (roaming_request->roaming_session->hostname); roaming_request->roaming_session->hostname = g_strndup (cur, end - cur); got_hostname = TRUE; goto leave; } } pn_debug ("%s", body); if (roaming_request->type == PN_GET_PROFILE) { char *cachekey; pn_parse_xml_tag (body, "CacheKey", &cachekey); if (cachekey) { g_free (roaming_request->roaming_session->cachekey); roaming_request->roaming_session->cachekey = cachekey; } } if (roaming_request->type == PN_GET_PROFILE) process_get_profile (roaming_request, body); /* else if (roaming_request->type == PN_UPDATE_PROFILE) */ g_free(body); } leave: pn_node_close (conn); next_request (roaming_request->roaming_session, got_hostname); }
static void dwc3_ep0_complete_data(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r = NULL; struct usb_request *ur; struct dwc3_trb *trb; struct dwc3_ep *ep0; u32 transferred; u32 status; u32 length; u8 epnum; epnum = event->endpoint_number; ep0 = dwc->eps[0]; dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS; trb = dwc->ep0_trb; r = next_request(&ep0->request_list); if (!r) return; status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (status == DWC3_TRBSTS_SETUP_PENDING) { dev_dbg(dwc->dev, "Setup Pending received\n"); if (r) dwc3_gadget_giveback(ep0, r, -ECONNRESET); return; } if (dwc->ep0_zlp_sent) goto finish_zlp; ur = &r->request; length = trb->size & DWC3_TRB_SIZE_MASK; if (dwc->ep0_bounced) { unsigned transfer_size = ur->length; unsigned maxp = ep0->endpoint.maxpacket; transfer_size += (maxp - (transfer_size % maxp)); transferred = min_t(u32, ur->length, transfer_size - length); memcpy(ur->buf, dwc->ep0_bounce, transferred); } else { transferred = ur->length - length; } ur->actual += transferred; if ((epnum & 1) && ur->actual < ur->length) { /* for some reason we did not get everything out */ dwc3_ep0_stall_and_restart(dwc); return; } /* handle the case where we have to send a zero packet */ if ((epnum & 1) && ur->zero && (ur->length % ep0->endpoint.maxpacket == 0)) { int ret; ret = dwc3_ep0_start_trans(dwc, epnum, dwc->ctrl_req_addr, 0, DWC3_TRBCTL_CONTROL_DATA); WARN_ON(ret < 0); dwc->ep0_zlp_sent = 1; return; } finish_zlp: dwc3_gadget_giveback(ep0, r, 0); }
static void dwc3_ep0_complete_data(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct dwc3_request *r = NULL; struct usb_request *ur; struct dwc3_trb *trb; struct dwc3_ep *ep0; unsigned transfer_size = 0; unsigned maxp; unsigned remaining_ur_length; void *buf; u32 transferred = 0; u32 status; u32 length; u8 epnum; epnum = event->endpoint_number; ep0 = dwc->eps[0]; dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS; trb = dwc->ep0_trb; trace_dwc3_complete_trb(ep0, trb); r = next_request(&ep0->request_list); if (!r) return; status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (status == DWC3_TRBSTS_SETUP_PENDING) { dwc->setup_packet_pending = true; dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); if (r) dwc3_gadget_giveback(ep0, r, -ECONNRESET); return; } ur = &r->request; buf = ur->buf; remaining_ur_length = ur->length; length = trb->size & DWC3_TRB_SIZE_MASK; maxp = ep0->endpoint.maxpacket; if (dwc->ep0_bounced) { /* * Handle the first TRB before handling the bounce buffer if * the request length is greater than the bounce buffer size */ if (ur->length > DWC3_EP0_BOUNCE_SIZE) { transfer_size = ALIGN(ur->length - maxp, maxp); transferred = transfer_size - length; buf = (u8 *)buf + transferred; ur->actual += transferred; remaining_ur_length -= transferred; trb++; length = trb->size & DWC3_TRB_SIZE_MASK; ep0->free_slot = 0; } transfer_size = roundup((ur->length - transfer_size), maxp); transferred = min_t(u32, remaining_ur_length, transfer_size - length); memcpy(buf, dwc->ep0_bounce, transferred); } else { transferred = ur->length - length; } ur->actual += transferred; if ((epnum & 1) && ur->actual < ur->length) { /* for some reason we did not get everything out */ dwc3_ep0_stall_and_restart(dwc); } else { dwc3_gadget_giveback(ep0, r, 0); if (IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) && ur->length && ur->zero) { int ret; dwc->ep0_next_event = DWC3_EP0_COMPLETE; ret = dwc3_ep0_start_trans(dwc, epnum, dwc->ctrl_req_addr, 0, DWC3_TRBCTL_CONTROL_DATA, false); WARN_ON(ret < 0); } } }