static int dump_urbs(struct list_head *list, char *buf, unsigned max) { int count = 0; int tmp; struct urb *urb; if (list_empty(list)) return snprintf(buf, max, "\t(queue empty)\n"); list_for_each_entry(urb, list, urb_list) { const unsigned pipe = urb->pipe; /* for non-multipoint, urb->dev never changes */ tmp = snprintf(buf, max, "\turb %p dev%d ep%d%s-%s %d/%d\n", urb, urb->dev->devnum, usb_pipeendpoint(pipe), usb_pipein(pipe) ? "in" : "out", ( { char *s; switch (usb_pipetype (pipe)) { case PIPE_BULK: s = "bulk"; break; case PIPE_INTERRUPT: s = "int"; break; case PIPE_CONTROL: s = "control"; break; default: s = "iso"; break;}; s;} ), urb->actual_length, urb->transfer_buffer_length) ; if (tmp < 0) break; tmp = min(tmp, (int)max); count += tmp; buf += tmp; max -= tmp; }
void usbip_dump_urb(struct urb *urb) { struct device *dev; if (!urb) { pr_debug("urb: null pointer!!\n"); return; } if (!urb->dev) { pr_debug("urb->dev: null pointer!!\n"); return; } dev = &urb->dev->dev; usbip_dump_usb_device(urb->dev); dev_dbg(dev, " pipe :%08x ", urb->pipe); usbip_dump_pipe(urb->pipe); dev_dbg(dev, " status :%d\n", urb->status); dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); dev_dbg(dev, " transfer_buffer_length:%d\n", urb->transfer_buffer_length); dev_dbg(dev, " actual_length :%d\n", urb->actual_length); if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) usbip_dump_usb_ctrlrequest( (struct usb_ctrlrequest *)urb->setup_packet); dev_dbg(dev, " start_frame :%d\n", urb->start_frame); dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); dev_dbg(dev, " interval :%d\n", urb->interval); dev_dbg(dev, " error_count :%d\n", urb->error_count); }
/* * Queue an URB to the ASL or PZL */ static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t mem_flags) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct whc *whc = wusbhc_to_whc(wusbhc); int ret; switch (usb_pipetype(urb->pipe)) { case PIPE_INTERRUPT: ret = pzl_urb_enqueue(whc, urb, mem_flags); break; case PIPE_ISOCHRONOUS: dev_err(&whc->umc->dev, "isochronous transfers unsupported\n"); ret = -ENOTSUPP; break; case PIPE_CONTROL: case PIPE_BULK: default: ret = asl_urb_enqueue(whc, urb, mem_flags); break; }; return ret; }
/* * non-error returns are a promise to giveback() the urb later * we drop ownership so next owner (or urb unlink) can get it * * urb + dev is in hcd.self.controller.urb_list * we're queueing TDs onto software and hardware lists * * hcd-specific init for hcpriv hasn't been done yet * * NOTE: control, bulk, and interrupt share the same code to append TDs * to a (possibly active) QH, and the same QH scanning code. */ static int ehci_urb_enqueue ( struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags ) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); struct list_head qtd_list; INIT_LIST_HEAD (&qtd_list); switch (usb_pipetype (urb->pipe)) { case PIPE_CONTROL: /* qh_completions() code doesn't handle all the fault cases * in multi-TD control transfers. Even 1KB is rare anyway. */ if (urb->transfer_buffer_length > (16 * 1024)) return -EMSGSIZE; /* FALLTHROUGH */ /* case PIPE_BULK: */ default: if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) return -ENOMEM; return submit_async(ehci, urb, &qtd_list, mem_flags); case PIPE_INTERRUPT: if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) return -ENOMEM; return intr_submit(ehci, urb, &qtd_list, mem_flags); case PIPE_ISOCHRONOUS: if (urb->dev->speed == USB_SPEED_HIGH) return itd_submit (ehci, urb, mem_flags); else return sitd_submit (ehci, urb, mem_flags); } }
static int ehci_submit_async(struct usb_device *dev, unsigned long pipe, void *buffer, int length, struct devrequest *req) { struct dwc_ctrl *ctrl = dev->controller; pUSB_OTG_REG otgReg = ctrl->otgReg; uint32_t channel_num = usb_pipeendpoint(pipe); uint32_t eptype; HOST_RET ret = HOST_OK; HCTSIZ_DATA hctsiz; HCCHAR_DATA hcchar; uint32_t hcStat; uint32_t errCnt; uint32_t packet_size; uint32_t datatoggle; eptype = usb_pipetype(pipe); // ep tye definition is different in pipe and dwc hcchar if(eptype == 2 ) eptype = 0; else if (eptype == 3) eptype = 2; debug("ehci_submit_async channel pipe %lx req %p, len %x\n", pipe, req, length); if(req == NULL) packet_size = 0x200; else packet_size = 0x40; if (req != NULL) { // setup for control hctsiz.d32 = 0; hctsiz.b.pid = DWC_HCTSIZ_SETUP; hctsiz.b.pktcnt = 1; hctsiz.b.xfersize = 8; hcchar.d32 = 0; hcchar.b.mps = packet_size; hcchar.b.epnum = channel_num; // use the same channel number as endpoint number hcchar.b.epdir = DWC_EPDIR_OUT; hcchar.b.eptype = eptype; hcchar.b.multicnt = 1; hcchar.b.devaddr = usb_pipedevice(pipe); hcchar.b.chdis = 0; hcchar.b.chen = 1; hcStat = HCSTAT_SETUP; errCnt = 0; dwc_init_channel(dev, hctsiz.d32, hcchar, (uint32_t)req); if(dwc_wait_for_complete(dev, channel_num, &hcStat, &errCnt)){ ret = HOST_ERR; goto out; } if(hcStat != HCSTAT_DONE){ ret = HOST_ERR; goto out; } } if (length || (req == NULL)) { // data for bulk & control if(req) datatoggle = DWC_HCTSIZ_DATA1; else datatoggle = ctrl->datatoggle[usb_pipein(pipe)]; debug("dwc_hcd data len %x toggle %x\n", length, datatoggle); hctsiz.d32 = 0; hctsiz.b.pid = datatoggle; hctsiz.b.pktcnt = (length+packet_size - 1)/packet_size; hctsiz.b.xfersize = length; hcchar.d32 = 0; hcchar.b.mps = packet_size; hcchar.b.epnum = channel_num; // use the same channel number as endpoint number hcchar.b.epdir = (req == NULL) ? usb_pipein(pipe) : DWC_EPDIR_IN; hcchar.b.eptype = eptype; hcchar.b.multicnt = 1; hcchar.b.devaddr = usb_pipedevice(pipe); hcchar.b.chdis = 0; hcchar.b.chen = 1; hcStat = HCSTAT_DATA; errCnt = 0; if((req == NULL)&&(hctsiz.b.pktcnt&0x01)) { ctrl->datatoggle[usb_pipein(pipe)] ^= 0x02; } dwc_init_channel(dev, hctsiz.d32, hcchar, (uint32_t)buffer); if(dwc_wait_for_complete(dev, channel_num, &hcStat, &errCnt)){ ret = HOST_ERR; goto out; } if(hcStat == HCSTAT_STALL) ctrl->datatoggle[usb_pipein(pipe)] = 0; } if (req != NULL) { // status for control debug("status len %x\n", length); hctsiz.d32 = 0; hctsiz.b.dopng = 0; hctsiz.b.pid = DWC_HCTSIZ_DATA1; hctsiz.b.pktcnt = 1; hctsiz.b.xfersize = 0; hcchar.d32 = 0; hcchar.b.mps = packet_size; hcchar.b.epnum = channel_num; // use the same channel number as endpoint number hcchar.b.epdir = (length) ? DWC_EPDIR_OUT : DWC_EPDIR_IN; hcchar.b.eptype = eptype; hcchar.b.multicnt = 1; hcchar.b.devaddr = usb_pipedevice(pipe); hcchar.b.chdis = 0; hcchar.b.chen = 1; hcStat = HCSTAT_DATA; errCnt = 0; dwc_init_channel(dev, hctsiz.d32, hcchar, 0); if(dwc_wait_for_complete(dev, channel_num, &hcStat, &errCnt)){ ret = HOST_ERR; goto out; } } out: if(ret){ debug("dwc_init channel hcziz %x, hcdma %x, hcchar %x\n", otgReg->Host.hchn[channel_num].hctsizn, otgReg->Host.hchn[channel_num].hcdman, otgReg->Host.hchn[channel_num].hccharn); } dev->act_len = length; dev->status = 0; return (ret); }
/** Initializes a QH structure. * * @param[in] _hcd The HCD state structure for the DWC OTG controller. * @param[in] _qh The QH to init. * @param[in] _urb Holds the information about the device/endpoint that we need * to initialize the QH. */ #define SCHEDULE_SLOP 10 #define SCHEDULE_SPLIT_SLOP 10 /* 1 == 125us, 10 -> 1.25ms, 20 -> 2.5ms, */ void dwc_otg_hcd_qh_init(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh, struct urb *_urb) { memset(_qh, 0, sizeof(dwc_otg_qh_t)); /* Initialize QH */ switch (usb_pipetype(_urb->pipe)) { case PIPE_CONTROL: _qh->ep_type = USB_ENDPOINT_XFER_CONTROL; break; case PIPE_BULK: _qh->ep_type = USB_ENDPOINT_XFER_BULK; break; case PIPE_ISOCHRONOUS: _qh->ep_type = USB_ENDPOINT_XFER_ISOC; break; case PIPE_INTERRUPT: _qh->ep_type = USB_ENDPOINT_XFER_INT; break; } _qh->ep_is_in = usb_pipein(_urb->pipe) ? 1 : 0; _qh->data_toggle = DWC_OTG_HC_PID_DATA0; _qh->maxp = usb_maxpacket(_urb->dev, _urb->pipe, !(usb_pipein(_urb->pipe))); INIT_LIST_HEAD(&_qh->qtd_list); INIT_LIST_HEAD(&_qh->qh_list_entry); _qh->channel = NULL; /* FS/LS Enpoint on HS Hub * NOT virtual root hub */ _qh->do_split = 0; if (((_urb->dev->speed == USB_SPEED_LOW) || (_urb->dev->speed == USB_SPEED_FULL)) && (_urb->dev->tt) && (_urb->dev->tt->hub) && (_urb->dev->tt->hub->devnum != 1)) { DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n", usb_pipeendpoint(_urb->pipe), _urb->dev->tt->hub->devnum, _urb->dev->ttport); _qh->do_split = 1; } if (_qh->ep_type == USB_ENDPOINT_XFER_INT || _qh->ep_type == USB_ENDPOINT_XFER_ISOC) { /* Compute scheduling parameters once and save them. */ hprt0_data_t hprt; /** @todo Account for split transfers in the bus time. */ int bytecount = dwc_hb_mult(_qh->maxp) * dwc_max_packet(_qh->maxp); int usecs = /*FIXME: hardcode to highspeed, to fix Full/Low speed device via Hub*/ usb_calc_bus_time(/*_urb->dev->speed*/USB_SPEED_HIGH, usb_pipein(_urb->pipe), (_qh->ep_type == USB_ENDPOINT_XFER_ISOC), bytecount); _qh->usecs = NS_TO_US(usecs); /* Start in a slightly future (micro)frame. */ _qh->sched_frame = dwc_frame_num_inc(_hcd->frame_number, SCHEDULE_SLOP); _qh->interval = _urb->interval; #if 0 /* Increase interrupt polling rate for debugging. */ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) { _qh->interval = 8; } #endif hprt.d32 = dwc_read_reg32(_hcd->core_if->host_if->hprt0); if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) && ((_urb->dev->speed == USB_SPEED_LOW) || (_urb->dev->speed == USB_SPEED_FULL))) { _qh->interval *= 8; _qh->sched_frame |= 0x7; _qh->start_split_frame = _qh->sched_frame; } }else{ if(_qh->do_split){ _qh->interval = SCHEDULE_SPLIT_SLOP; _qh->sched_frame = dwc_frame_num_inc(_hcd->frame_number, _qh->interval); }; } DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n"); DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", _qh); DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n", _urb->dev->devnum); DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n", usb_pipeendpoint(_urb->pipe), usb_pipein(_urb->pipe) == USB_DIR_IN ? "IN" : "OUT"); DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n", ( { char *speed; switch(_urb->dev->speed) { case USB_SPEED_LOW: speed = "low"; break; case USB_SPEED_FULL: speed = "full"; break; case USB_SPEED_HIGH: speed = "high"; break; default: speed = "?"; break;}; speed;})) ;
void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb) { char *speed, *type; memset (qh, 0, sizeof (dwc_otg_qh_t)); /* Initialize QH */ switch (usb_pipetype(urb->pipe)) { case PIPE_CONTROL: qh->ep_type = USB_ENDPOINT_XFER_CONTROL; break; case PIPE_BULK: qh->ep_type = USB_ENDPOINT_XFER_BULK; break; case PIPE_ISOCHRONOUS: qh->ep_type = USB_ENDPOINT_XFER_ISOC; break; case PIPE_INTERRUPT: qh->ep_type = USB_ENDPOINT_XFER_INT; break; } qh->ep_is_in = usb_pipein(urb->pipe) ? 1 : 0; qh->data_toggle = DWC_OTG_HC_PID_DATA0; qh->maxp = usb_maxpacket(urb->dev, urb->pipe, !(usb_pipein(urb->pipe))); INIT_LIST_HEAD(&qh->qtd_list); INIT_LIST_HEAD(&qh->qh_list_entry); qh->channel = NULL; /* FS/LS Enpoint on HS Hub * NOT virtual root hub */ qh->do_split = 0; if (((urb->dev->speed == USB_SPEED_LOW) || (urb->dev->speed == USB_SPEED_FULL)) && (urb->dev->tt) && (urb->dev->tt->hub) && (urb->dev->tt->hub->devnum != 1)) { DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n", usb_pipeendpoint(urb->pipe), urb->dev->tt->hub->devnum, urb->dev->ttport); qh->do_split = 1; } if (qh->ep_type == USB_ENDPOINT_XFER_INT || qh->ep_type == USB_ENDPOINT_XFER_ISOC) { /* Compute scheduling parameters once and save them. */ hprt0_data_t hprt; /** @todo Account for split transfers in the bus time. */ int bytecount = dwc_hb_mult(qh->maxp) * dwc_max_packet(qh->maxp); /* FIXME: work-around patch by Steven */ qh->usecs = NS_TO_US(usb_calc_bus_time(urb->dev->speed, usb_pipein(urb->pipe), (qh->ep_type == USB_ENDPOINT_XFER_ISOC), bytecount)); /* Start in a slightly future (micro)frame. */ qh->sched_frame = dwc_frame_num_inc(hcd->frame_number, SCHEDULE_SLOP); qh->interval = urb->interval; #if 0 /* Increase interrupt polling rate for debugging. */ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { qh->interval = 8; } #endif hprt.d32 = dwc_read_reg32(hcd->core_if->host_if->hprt0); if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) && ((urb->dev->speed == USB_SPEED_LOW) || (urb->dev->speed == USB_SPEED_FULL))) { qh->interval *= 8; qh->sched_frame |= 0x7; qh->start_split_frame = qh->sched_frame; } } DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n"); DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", qh); DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n", urb->dev->devnum); DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n", usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe) == USB_DIR_IN ? "IN" : "OUT"); switch(urb->dev->speed) { case USB_SPEED_LOW: speed = "low"; break; case USB_SPEED_FULL: speed = "full"; break; case USB_SPEED_HIGH: speed = "high"; break; default: speed = "?"; break; } DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n", speed); switch (qh->ep_type) { case USB_ENDPOINT_XFER_ISOC: type = "isochronous"; break; case USB_ENDPOINT_XFER_INT: type = "interrupt"; break; case USB_ENDPOINT_XFER_CONTROL: type = "control"; break; case USB_ENDPOINT_XFER_BULK: type = "bulk"; break; default: type = "?"; break; } DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n",type); #ifdef DEBUG if (qh->ep_type == USB_ENDPOINT_XFER_INT) { DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n", qh->usecs); DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n", qh->interval); } #endif qh->dw_align_buf = NULL; return; }
int chunk_msg(struct usb_device *dev, unsigned long pipe, int *pid, int in, void *buffer, int len, bool ignore_ack) { struct dwc2_hc_regs *hc_regs = ®s->hc_regs[DWC2_HC_CHANNEL]; int devnum = usb_pipedevice(pipe); int ep = usb_pipeendpoint(pipe); int max = usb_maxpacket(dev, pipe); int eptype = dwc2_eptype[usb_pipetype(pipe)]; int done = 0; int ret = 0; uint32_t sub; uint32_t xfer_len; uint32_t num_packets; int stop_transfer = 0; debug("%s: msg: pipe %lx pid %d in %d len %d\n", __func__, pipe, *pid, in, len); do { /* Initialize channel */ dwc_otg_hc_init(regs, DWC2_HC_CHANNEL, dev, devnum, ep, in, eptype, max); xfer_len = len - done; if (xfer_len > CONFIG_DWC2_MAX_TRANSFER_SIZE) xfer_len = CONFIG_DWC2_MAX_TRANSFER_SIZE - max + 1; if (xfer_len > DWC2_DATA_BUF_SIZE) xfer_len = DWC2_DATA_BUF_SIZE - max + 1; /* Make sure that xfer_len is a multiple of max packet size. */ if (xfer_len > 0) { num_packets = (xfer_len + max - 1) / max; if (num_packets > CONFIG_DWC2_MAX_PACKET_COUNT) { num_packets = CONFIG_DWC2_MAX_PACKET_COUNT; xfer_len = num_packets * max; } } else { num_packets = 1; } if (in) xfer_len = num_packets * max; debug("%s: chunk: pid %d xfer_len %u pkts %u\n", __func__, *pid, xfer_len, num_packets); writel((xfer_len << DWC2_HCTSIZ_XFERSIZE_OFFSET) | (num_packets << DWC2_HCTSIZ_PKTCNT_OFFSET) | (*pid << DWC2_HCTSIZ_PID_OFFSET), &hc_regs->hctsiz); if (!in) memcpy(aligned_buffer, (char *)buffer + done, len); writel(phys_to_bus((unsigned long)aligned_buffer), &hc_regs->hcdma); /* Set host channel enable after all other setup is complete. */ clrsetbits_le32(&hc_regs->hcchar, DWC2_HCCHAR_MULTICNT_MASK | DWC2_HCCHAR_CHEN | DWC2_HCCHAR_CHDIS, (1 << DWC2_HCCHAR_MULTICNT_OFFSET) | DWC2_HCCHAR_CHEN); ret = wait_for_chhltd(&sub, pid, ignore_ack); if (ret) break; if (in) { xfer_len -= sub; memcpy(buffer + done, aligned_buffer, xfer_len); if (sub) stop_transfer = 1; } done += xfer_len; } while ((done < len) && !stop_transfer); writel(0, &hc_regs->hcintmsk); writel(0xFFFFFFFF, &hc_regs->hcint); dev->status = 0; dev->act_len = done; return ret; }
static int vhcd_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep, struct urb *urb, gfp_t mem_flags) { int ret = 0; unsigned int transfer_flags = 0 ; struct usb_device * udev = urb->dev; /* FIXME Check for non existent device */ if (!HC_IS_RUNNING(hcd->state)) { LOG("HC is not running\n"); return -ENODEV; } /* we have to trap some control messages, i.e. USB_REQ_SET_ADDRESS... */ /* TODO we don't have to do it here, but in the server */ if (usb_pipedevice(urb->pipe) == 0) { __u8 type = usb_pipetype(urb->pipe); struct usb_ctrlrequest *ctrlreq = (struct usb_ctrlrequest *) urb->setup_packet; if (type != PIPE_CONTROL || !ctrlreq ) { LOG("invalid request to devnum 0\n"); ret = -EINVAL; goto no_need_xmit; } switch (ctrlreq->bRequest) { case USB_REQ_SET_ADDRESS: LOG("SetAddress Request (%d) to port %d\n", ctrlreq->wValue, urb->dev->portnum); spin_lock (&urb->lock); if (urb->status == -EINPROGRESS) { /* This request is successfully completed. */ /* If not -EINPROGRESS, possibly unlinked. */ urb->status = 0; } spin_unlock (&urb->lock); goto no_need_xmit; case USB_REQ_GET_DESCRIPTOR: if (ctrlreq->wValue == (USB_DT_DEVICE << 8)) LOG("Get_Descriptor to device 0 (get max pipe size)\n"); goto out; default: /* NOT REACHED */ LOG("invalid request to devnum 0 bRequest %u, wValue %u\n", ctrlreq->bRequest, ctrlreq->wValue); ret = -EINVAL; goto no_need_xmit; } } out: if (urb->status != -EINPROGRESS) { LOG("URB already unlinked!, status %d\n", urb->status); return urb->status; } if (usb_pipeisoc(urb->pipe)) { LOG("ISO URBs not supported"); ret = -EINVAL; goto no_need_xmit; } urb->hcpriv = (void *) hcd_to_vhcd(hcd); LOG("hcpriv %p", urb->hcpriv); transfer_flags = urb->transfer_flags; usb_get_urb(urb); #if 0 d_urb->type = usb_pipetype(urb->pipe); d_urb->dev_id = data->gadget[urb->dev->portnum-1].id; d_urb->endpoint = usb_pipeendpoint(urb->pipe); d_urb->direction = 0 || usb_pipein(urb->pipe); d_urb->interval = urb->interval; d_urb->transfer_flags = urb->transfer_flags; d_urb->number_of_packets = urb->number_of_packets; d_urb->priv = priv; d_urb->size = urb->transfer_buffer_length; d_urb->data = urb->transfer_buffer; d_urb->phys_addr = d_urb->data?virt_to_phys(d_urb->data):0; if (urb->setup_packet) { memcpy(d_urb->setup_packet, urb->setup_packet, 8); } /* XXX ISO ? */ // if (urb->number_of_packets) // memcpy(d_urb->iso_desc, urb->iso_frame_desc, urb->number_of_packets*sizeof(struct usb_iso_packet_descriptor)); ret = libddeusb_submit_d_urb(d_urb); #else unsigned port_num = urb->dev->portnum; switch (usb_pipetype(urb->pipe)) { case PIPE_CONTROL: { struct usb_ctrlrequest *req = (struct usb_ctrlrequest *) urb->setup_packet; dde_linux26_usb_vhcd_submit_control_urb_cb(port_num, usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe), urb, /* handle */ sizeof(*req), req); } break; case PIPE_INTERRUPT: printk(" int\n"); // dde_linux26_usb_vhcd_submit_urb(urb->transfer_buffer, // urb->transfer_buffer_length); return -EINVAL; break; /* unsupported transfer types */ case PIPE_BULK: printk(" bulk\n"); return -EINVAL; case PIPE_ISOCHRONOUS: printk(" isoc\n"); return -EINVAL; } #endif // if (ret) { // LOG("URB SUBMIT FAILED (%d).",ret); // /* s.t. went wrong. */ // spin_lock_irqsave(&data->lock, flags); // data->rcv_buf[i]=NULL; // spin_unlock_irqrestore(&data->lock, flags); // down(&data->rcv_buf_free); // kmem_cache_free(priv_cache, urb->hcpriv); // usb_put_urb(urb); // urb->status = ret; // urb->hcpriv = NULL; // libddeusb_free_d_urb(d_urb); // return ret; // } LOG("URB %p submitted", urb); return 0; no_need_xmit: usb_hcd_giveback_urb(hcd, urb); return 0; }
/** * Sets the final status of an URB and returns it to the device driver. Any * required cleanup of the URB is performed. */ static int _complete(dwc_otg_hcd_t * hcd, void *urb_handle, dwc_otg_hcd_urb_t * dwc_otg_urb, uint32_t status) { struct urb *urb = (struct urb *)urb_handle; #ifdef DEBUG if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { DWC_PRINTF("%s: urb %p, device %d, ep %d %s, status=%d\n", __func__, urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe) ? "IN" : "OUT", status); if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { int i; for (i = 0; i < urb->number_of_packets; i++) { DWC_PRINTF(" ISO Desc %d status: %d\n", i, urb->iso_frame_desc[i].status); } } } #endif urb->actual_length = dwc_otg_hcd_urb_get_actual_length(dwc_otg_urb); /* Convert status value. */ switch (status) { case -DWC_E_PROTOCOL: status = -EPROTO; break; case -DWC_E_IN_PROGRESS: status = -EINPROGRESS; break; case -DWC_E_PIPE: status = -EPIPE; break; case -DWC_E_IO: status = -EIO; break; case -DWC_E_TIMEOUT: status = -ETIMEDOUT; break; default: if (status) { /* alan.K * DWC_OTG IP don't know this status, so assumed to be a DWC_E_PROTOCOL. */ DWC_WARN("Unknown urb status %d, but assumed to be an EPROTO\n", status); status = -EPROTO; } } if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { int i; urb->error_count = dwc_otg_hcd_urb_get_error_count(dwc_otg_urb); for (i = 0; i < urb->number_of_packets; ++i) { urb->iso_frame_desc[i].actual_length = dwc_otg_hcd_urb_get_iso_desc_actual_length (dwc_otg_urb, i); urb->iso_frame_desc[i].status = dwc_otg_hcd_urb_get_iso_desc_actual_length (dwc_otg_urb, i); } } urb->status = status; urb->hcpriv = NULL; if (!status) { if ((urb->transfer_flags & URB_SHORT_NOT_OK) && (urb->actual_length < urb->transfer_buffer_length)) { urb->status = -EREMOTEIO; } } if ((usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) || (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) { struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); if (ep) { free_bus_bandwidth(dwc_otg_hcd_to_hcd(hcd), dwc_otg_hcd_get_ep_bandwidth(hcd, ep->hcpriv), urb); } } dwc_free(dwc_otg_urb); usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status); return 0; }
/** * usb_submit_urb - issue an asynchronous transfer request for an endpoint * @urb: pointer to the urb describing the request * @mem_flags: the type of memory to allocate, see kmalloc() for a list * of valid options for this. * * This submits a transfer request, and transfers control of the URB * describing that request to the USB subsystem. Request completion will * be indicated later, asynchronously, by calling the completion handler. * The three types of completion are success, error, and unlink * (also called "request cancellation"). * URBs may be submitted in interrupt context. * * The caller must have correctly initialized the URB before submitting * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are * available to ensure that most fields are correctly initialized, for * the particular kind of transfer, although they will not initialize * any transfer flags. * * Successful submissions return 0; otherwise this routine returns a * negative error number. If the submission is successful, the complete() * callback from the urb will be called exactly once, when the USB core and * host controller driver are finished with the urb. When the completion * function is called, control of the URB is returned to the device * driver which issued the request. The completion handler may then * immediately free or reuse that URB. * * For control endpoints, the synchronous usb_control_msg() call is * often used (in non-interrupt context) instead of this call. * That is often used through convenience wrappers, for the requests * that are standardized in the USB 2.0 specification. For bulk * endpoints, a synchronous usb_bulk_msg() call is available. * * Request Queuing: * * URBs may be submitted to endpoints before previous ones complete, to * minimize the impact of interrupt latencies and system overhead on data * throughput. This is required for continuous isochronous data streams, * and may also be required for some kinds of interrupt transfers. Such * queueing also maximizes bandwidth utilization by letting USB controllers * start work on later requests before driver software has finished the * completion processing for earlier requests. * * Bulk and Isochronous URBs may always be queued. At this writing, all * mainstream host controller drivers support queueing for control and * interrupt transfer requests. * * Reserved Bandwidth Transfers: * * Periodic transfers (interrupt or isochronous) are performed repeatedly, * using the interval specified in the urb. Submitting the first urb to * the endpoint reserves the bandwidth necessary to make those transfers. * If the USB subsystem can't allocate sufficient bandwidth to perform * the periodic request, submitting such a periodic request should fail. * * Device drivers must explicitly request that repetition, by ensuring that * some URB is always on the endpoint's queue (except possibly for short * periods during completion callacks). When there is no longer an urb * queued, the endpoint's bandwidth reservation is canceled. This means * drivers can use their completion handlers to ensure they keep bandwidth * they need, by reinitializing and resubmitting the just-completed urb * until the driver longer needs that periodic bandwidth. * * Memory Flags: * * The general rules for how to decide which mem_flags to use * are the same as for kmalloc. There are four * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and * GFP_ATOMIC. * * GFP_NOFS is not ever used, as it has not been implemented yet. * * GFP_ATOMIC is used when * (a) you are inside a completion handler, an interrupt, bottom half, * tasklet or timer, or * (b) you are holding a spinlock or rwlock (does not apply to * semaphores), or * (c) current->state != TASK_RUNNING, this is the case only after * you've changed it. * * GFP_NOIO is used in the block io path and error handling of storage * devices. * * All other situations use GFP_KERNEL. * * Some more specific rules for mem_flags can be inferred, such as * (1) start_xmit, timeout, and receive methods of network drivers must * use GFP_ATOMIC (they are called with a spinlock held); * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also * called with a spinlock held); * (3) If you use a kernel thread with a network driver you must use * GFP_NOIO, unless (b) or (c) apply; * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) * apply or your are in a storage driver's block io path; * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and * (6) changing firmware on a running storage or net device uses * GFP_NOIO, unless b) or c) apply * */ int usb_submit_urb(struct urb *urb, int mem_flags) { int pipe, temp, max; struct usb_device *dev; struct usb_operations *op; int is_out; // printk("sub dev %p bus %p num %i op %p sub %p\n", // urb->dev, urb->dev->bus,urb->dev->devnum,urb->dev->bus->op, urb->dev->bus->op->submit_urb); if (!urb || urb->hcpriv || !urb->complete) return -EINVAL; if (!(dev = urb->dev) || (dev->state < USB_STATE_DEFAULT) || (!dev->bus) || (dev->devnum <= 0)) return -ENODEV; if (!(op = dev->bus->op) || !op->submit_urb) return -ENODEV; urb->status = -EINPROGRESS; urb->actual_length = 0; urb->bandwidth = 0; /* Lots of sanity checks, so HCDs can rely on clean data * and don't need to duplicate tests */ pipe = urb->pipe; temp = usb_pipetype (pipe); is_out = usb_pipeout (pipe); if (!usb_pipecontrol (pipe) && dev->state < USB_STATE_CONFIGURED) return -ENODEV; /* (actually HCDs may need to duplicate this, endpoint might yet * stall due to queued bulk/intr transactions that complete after * we check) */ if (usb_endpoint_halted (dev, usb_pipeendpoint (pipe), is_out)) return -EPIPE; /* FIXME there should be a sharable lock protecting us against * config/altsetting changes and disconnects, kicking in here. * (here == before maxpacket, and eventually endpoint type, * checks get made.) */ max = usb_maxpacket (dev, pipe, is_out); if (max <= 0) { dbg ("%s: bogus endpoint %d-%s on usb-%s-%s (bad maxpacket %d)", __FUNCTION__, usb_pipeendpoint (pipe), is_out ? "OUT" : "IN", dev->bus->bus_name, dev->devpath, max); return -EMSGSIZE; } /* periodic transfers limit size per frame/uframe, * but drivers only control those sizes for ISO. * while we're checking, initialize return status. */ if (temp == PIPE_ISOCHRONOUS) { int n, len; /* "high bandwidth" mode, 1-3 packets/uframe? */ if (dev->speed == USB_SPEED_HIGH) { int mult = 1 + ((max >> 11) & 0x03); max &= 0x03ff; max *= mult; }
//Create and return an interrupt queue object. struct int_queue* EHCICreateIntQueue(struct usb_device *dev, unsigned long pipe, int queuesize, int elementsize, void *buffer, int interval) { struct ehci_ctrl *ctrl = ehci_get_ctrl(dev); struct int_queue *result = NULL; uint32_t i, toggle; struct QH *list = NULL; int cmd = 0; DWORD dwFlags; /* * Interrupt transfers requiring several transactions are not supported * because bInterval is ignored. * * Also, ehci_submit_async() relies on wMaxPacketSize being a power of 2 * <= PKT_ALIGN if several qTDs are required, while the USB * specification does not constrain this for interrupt transfers. That * means that ehci_submit_async() would support interrupt transfers * requiring several transactions only as long as the transfer size does * not require more than a single qTD. */ if (elementsize > usb_maxpacket(dev, pipe)) { printf("%s: xfers requiring several transactions are not supported.\r\n", "_ehci_create_int_queue"); return NULL; } if (usb_pipetype(pipe) != PIPE_INTERRUPT) { debug("non-interrupt pipe (type=%lu)", usb_pipetype(pipe)); return NULL; } /* limit to 4 full pages worth of data - * we can safely fit them in a single TD, * no matter the alignment */ if (elementsize >= 16384) { debug("too large elements for interrupt transfers\r\n"); return NULL; } result = malloc(sizeof(*result)); if (!result) { debug("ehci intr queue: out of memory\r\n"); goto fail1; } //Create EVENT object to synchronizing the access. result->hEvent = CreateEvent(FALSE); if (NULL == result->hEvent) { goto fail1; } result->dwTimeOut = 0; result->pNext = NULL; result->pOwnerThread = KernelThreadManager.lpCurrentKernelThread; result->QueueIntHandler = _ehciQueueIntHandler; result->pUsbDev = dev; result->dwStatus = INT_QUEUE_STATUS_INITIALIZED; result->elementsize = elementsize; result->pipe = pipe; result->first = memalign(USB_DMA_MINALIGN, sizeof(struct QH) * queuesize); if (!result->first) { debug("ehci intr queue: out of memory\r\n"); goto fail2; } debug("%s: Allocate %d QH(s) at %X.\r\n", __func__,queuesize,result->first); result->current = result->first; result->last = result->first + queuesize - 1; result->tds = memalign(USB_DMA_MINALIGN, sizeof(struct qTD) * queuesize); if (!result->tds) { debug("ehci intr queue: out of memory\r\n"); goto fail3; } debug("%s: Allocate %d qTD(s) at %X.\r\n", __func__,queuesize, result->tds); memset(result->first, 0, sizeof(struct QH) * queuesize); memset(result->tds, 0, sizeof(struct qTD) * queuesize); toggle = usb_gettoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe)); for (i = 0; i < (uint32_t)queuesize; i++) { struct QH *qh = result->first + i; struct qTD *td = result->tds + i; void **buf = &qh->buffer; qh->qh_link = cpu_to_hc32((unsigned long)(qh + 1) | QH_LINK_TYPE_QH); if (i == queuesize - 1) qh->qh_link = cpu_to_hc32(QH_LINK_TERMINATE); qh->qh_overlay.qt_next = cpu_to_hc32((unsigned long)td); qh->qh_overlay.qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); qh->qh_endpt1 = cpu_to_hc32((0 << 28) | /* No NAK reload (ehci 4.9) */ (usb_maxpacket(dev, pipe) << 16) | /* MPS */ (1 << 14) | QH_ENDPT1_EPS(ehci_encode_speed(dev->speed)) | (usb_pipeendpoint(pipe) << 8) | /* Endpoint Number */ (usb_pipedevice(pipe) << 0)); qh->qh_endpt2 = cpu_to_hc32((1 << 30) | /* 1 Tx per mframe */ (1 << 0)); /* S-mask: microframe 0 */ if (dev->speed == USB_SPEED_LOW || dev->speed == USB_SPEED_FULL) { /* C-mask: microframes 2-4 */ qh->qh_endpt2 |= cpu_to_hc32((0x1c << 8)); } ehci_update_endpt2_dev_n_port(dev, qh); td->qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); td->qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); debug("%s: communication direction is '%s'\r\n", __func__, usb_pipein(pipe) ? "in" : "out"); if (i == queuesize - 1) //Last one,set IoC bit. { td->qt_token = cpu_to_hc32( QT_TOKEN_DT(toggle) | (elementsize << 16) | (1 << 15) | //Interrupt On Completion. (3 << 10) | //CERR bits. ((usb_pipein(pipe) ? 1 : 0) << 8) | /* IN/OUT token */ 0x80); /* active */ } else { td->qt_token = cpu_to_hc32( QT_TOKEN_DT(toggle) | (elementsize << 16) | (3 << 10) | //CERR bits. ((usb_pipein(pipe) ? 1 : 0) << 8) | /* IN/OUT token */ 0x80); /* active */ } debug("%s: construct TD token = %X.\r\n", __func__, td->qt_token); td->qt_buffer[0] = cpu_to_hc32((unsigned long)buffer + i * elementsize); td->qt_buffer[1] = cpu_to_hc32((td->qt_buffer[0] + 0x1000) & ~0xfff); td->qt_buffer[2] = cpu_to_hc32((td->qt_buffer[0] + 0x2000) & ~0xfff); td->qt_buffer[3] = cpu_to_hc32((td->qt_buffer[0] + 0x3000) & ~0xfff); td->qt_buffer[4] = cpu_to_hc32((td->qt_buffer[0] + 0x4000) & ~0xfff); #ifdef __MS_VC__ //MS VC can not support sizeof(void) operation,we should //convert the buffer type to char*. *buf = (void*)((char*)buffer + i * elementsize); #else //sizeof(void) is 1 under GCC or other environment,so the //following sentence is same as above one. *buf = buffer + i * elementsize; #endif toggle ^= 1; } flush_dcache_range((unsigned long)buffer, ALIGN_END_ADDR(char, buffer, queuesize * elementsize)); flush_dcache_range((unsigned long)result->first, ALIGN_END_ADDR(struct QH, result->first, queuesize)); flush_dcache_range((unsigned long)result->tds, ALIGN_END_ADDR(struct qTD, result->tds, queuesize)); //Acquire exclusively accessing of the controller. WaitForThisObject(ctrl->hMutex); if (ctrl->periodic_schedules > 0) { if (ehci_disable_periodic(ctrl) < 0) { ReleaseMutex(ctrl->hMutex); _hx_printf("FATAL %s: periodic should never fail, but did.\r\n",__func__); goto fail3; } } __ENTER_CRITICAL_SECTION(NULL, dwFlags); /* hook up to periodic list */ list = &ctrl->periodic_queue; result->last->qh_link = list->qh_link; list->qh_link = cpu_to_hc32((unsigned long)result->first | QH_LINK_TYPE_QH); //Link interrupt queue to Controller's pending queue. if (NULL == ctrl->pIntQueueFirst) { ctrl->pIntQueueFirst = result; ctrl->pIntQueueLast = result; } else { result->pNext = ctrl->pIntQueueFirst; ctrl->pIntQueueFirst = result; } __LEAVE_CRITICAL_SECTION(NULL, dwFlags); flush_dcache_range((unsigned long)result->last, ALIGN_END_ADDR(struct QH, result->last, 1)); flush_dcache_range((unsigned long)list, ALIGN_END_ADDR(struct QH, list, 1)); if (ehci_enable_periodic(ctrl) < 0) { ReleaseMutex(ctrl->hMutex); _hx_printf("FATAL %s: periodic should never fail, but did.\r\n", __func__);; goto fail3; } ctrl->periodic_schedules++; ReleaseMutex(ctrl->hMutex); debug("Exit create_int_queue\r\n"); return result; fail3: if (result->tds) free(result->tds); fail2: if (result->first) free(result->first); //if (result) // free(result); fail1: if (result) { if (NULL != result->hEvent) { DestroyEvent(result->hEvent); } free(result); } return NULL; }
/* * For simplicity, we read one record in one system call and throw out * what does not fit. This means that the following does not work: * dd if=/dbg/usbmon/0t bs=10 * Also, we do not allow seeks and do not bother advancing the offset. */ static ssize_t mon_text_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct mon_reader_text *rp = file->private_data; struct mon_bus *mbus = rp->r.m_bus; DECLARE_WAITQUEUE(waita, current); struct mon_event_text *ep; int cnt, limit; char *pbuf; char udir, utype; int data_len, i; add_wait_queue(&rp->wait, &waita); set_current_state(TASK_INTERRUPTIBLE); while ((ep = mon_text_fetch(rp, mbus)) == NULL) { if (file->f_flags & O_NONBLOCK) { set_current_state(TASK_RUNNING); remove_wait_queue(&rp->wait, &waita); return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ } /* * We do not count nwaiters, because ->release is supposed * to be called when all openers are gone only. */ schedule(); if (signal_pending(current)) { remove_wait_queue(&rp->wait, &waita); return -EINTR; } set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&rp->wait, &waita); down(&rp->printf_lock); cnt = 0; pbuf = rp->printf_buf; limit = rp->printf_size; udir = usb_pipein(ep->pipe) ? 'i' : 'o'; switch (usb_pipetype(ep->pipe)) { case PIPE_ISOCHRONOUS: utype = 'Z'; break; case PIPE_INTERRUPT: utype = 'I'; break; case PIPE_CONTROL: utype = 'C'; break; default: /* PIPE_BULK */ utype = 'B'; } cnt += snprintf(pbuf + cnt, limit - cnt, "%lx %u %c %c%c:%03u:%02u", ep->id, ep->tstamp, ep->type, utype, udir, usb_pipedevice(ep->pipe), usb_pipeendpoint(ep->pipe)); if (ep->setup_flag == 0) { /* Setup packet is present and captured */ cnt += snprintf(pbuf + cnt, limit - cnt, " s %02x %02x %04x %04x %04x", ep->setup[0], ep->setup[1], (ep->setup[3] << 8) | ep->setup[2], (ep->setup[5] << 8) | ep->setup[4], (ep->setup[7] << 8) | ep->setup[6]); } else if (ep->setup_flag != '-') { /* Unable to capture setup packet */ cnt += snprintf(pbuf + cnt, limit - cnt, " %c __ __ ____ ____ ____", ep->setup_flag); } else { /* No setup for this kind of URB */ cnt += snprintf(pbuf + cnt, limit - cnt, " %d", ep->status); } cnt += snprintf(pbuf + cnt, limit - cnt, " %d", ep->length); if ((data_len = ep->length) > 0) { if (ep->data_flag == 0) { cnt += snprintf(pbuf + cnt, limit - cnt, " ="); if (data_len >= DATA_MAX) data_len = DATA_MAX; for (i = 0; i < data_len; i++) { if (i % 4 == 0) { cnt += snprintf(pbuf + cnt, limit - cnt, " "); } cnt += snprintf(pbuf + cnt, limit - cnt, "%02x", ep->data[i]); } cnt += snprintf(pbuf + cnt, limit - cnt, "\n"); } else { cnt += snprintf(pbuf + cnt, limit - cnt, " %c\n", ep->data_flag); } } else { cnt += snprintf(pbuf + cnt, limit - cnt, "\n"); } if (copy_to_user(buf, rp->printf_buf, cnt)) cnt = -EFAULT; up(&rp->printf_lock); kmem_cache_free(rp->e_slab, ep); return cnt; }
/********************************************************** * Name : hcdi_s3602_submit_upt * Description : submit a usb transfer request to hc of s3602 * Parameter : pupt: usb transfer request block * Return : RET_SUCCESS: success * !RET_SUCCESS: error ***********************************************************/ RET_CODE hcdi_s3602_submit_upt_ex(struct upt* pupt) { struct usb_hc_device* hc_dev = (struct usb_hc_device*) dev_get_by_type(NULL, HLD_DEV_TYPE_USB_HOST); struct hc_s3602_private* p_priv = NULL ; UINT32 pipe; UINT32 ret; // soc_printf("Submit\n"); if(NULL == pupt) return !RET_SUCCESS; if(pupt->type == USB_CONTROL_TRANSFER_MODE) { p_priv = (struct hc_s3602_private*)hc_dev->priv_ctrl; hc_dev->curr_upt_ctl= pupt; } else if(pupt->type == USB_BULK_IN_TRANSFER_MODE) { p_priv = (struct hc_s3602_private*)hc_dev->priv_bulk; hc_dev->curr_upt_bulk= pupt; } else if(pupt->type == USB_BULK_OUT_TRANSFER_MODE) { p_priv = (struct hc_s3602_private*)hc_dev->priv_bulk_out; hc_dev->curr_upt_bulk_out= pupt; } else if(pupt->type == USB_OPT_BULK_TRANSFER_MODE) { p_priv = (struct hc_s3602_private*)hc_dev->priv_opt_bulk; hc_dev->curr_upt_opt_bulk= pupt; } else if(pupt->type == USB_HUB_TRANSFER_MODE) { p_priv = (struct hc_s3602_private*)hc_dev->priv_int_d; hc_dev->curr_upt_int_d= pupt; } else if(pupt->type == USB_INTERRUPT_TRANSFER_MODE) { p_priv = (struct hc_s3602_private*)hc_dev->priv_int_c; hc_dev->curr_upt_int_c= pupt; } else return !RET_SUCCESS; if (NULL == p_priv) { HCDI_CMD_PRINTF("s3602_submit_upt: hc's private doesn't exist!\n"); return !RET_SUCCESS; } if (p_priv->main_sts == HCD_S3602_MAIN_BUSY) { HCDI_CMD_PRINTF("s3602_submit_upt: hc is busy!\n"); return !RET_SUCCESS; } if (pupt->opt_bulk.opt_bulk_en == FALSE) { pipe = pupt->pipe; switch (usb_pipetype(pipe)) { case USB_CTRL: if (usb_pipein(pipe)) //direction is IN { if (pupt->length == 0) { HCDI_CMD_PRINTF("usb ctrl in length is 0!\n"); return !RET_SUCCESS; } } break; case USB_BULK: if (pupt->length == 0) { HCDI_CMD_PRINTF("usb bulk length is 0!\n"); return !RET_SUCCESS; } break; case USB_INT: //HCDI_CMD_PRINTF("USB INT Ep!\n"); break; default: break; } } pupt->hc_dev = (void *) hc_dev; p_priv->event_type = HCD_S3602_USB_EVT_REQ; p_priv->hc_dev = hc_dev; hcd_event_dispatch_ex(p_priv); return RET_SUCCESS; }
static int dwc2_transfer(struct usb_device *dev, unsigned long pipe, int size, int pid, ep_dir_t dir, uint32_t ch_num, u8 *data_buf) { struct dwc_ctrl *ctrl = dev->controller; pUSB_OTG_REG reg = ctrl->otgReg; uint32_t do_copy; int ret; uint32_t packet_cnt; uint32_t packet_size; uint32_t transferred = 0; uint32_t inpkt_length; uint32_t eptype; HCTSIZ_DATA hctsiz = { .d32 = 0 }; HCCHAR_DATA hcchar = { .d32 = 0 }; void *aligned_buf; debug("# %s #dev %p, size %d, pid %d, dir %d, buf %p\n", __func__, dev, size, pid, dir, data_buf); if (dev->speed != USB_SPEED_HIGH) { printf("Support high-speed only\n"); return -1; } if (size > DMA_SIZE) { printf("Transfer too large: %d\n", size); return -1; } packet_size = usb_maxpacket(dev, pipe); packet_cnt = DIV_ROUND_UP(size, packet_size); inpkt_length = roundup(size, packet_size); /* At least 1 packet should be programed */ packet_cnt = (packet_cnt == 0) ? 1 : packet_cnt; /* * For an IN, this field is the buffer size that the application has * reserved for the transfer. The application should program this field * as integer multiple of the maximum packet size for IN transactions. */ hctsiz.xfersize = (dir == EPDIR_OUT) ? size : inpkt_length; hctsiz.pktcnt = packet_cnt; hctsiz.pid = pid; hcchar.mps = packet_size; hcchar.epnum = usb_pipeendpoint(pipe); hcchar.epdir = dir; switch (usb_pipetype(pipe)) { case PIPE_CONTROL: eptype = 0; break; case PIPE_BULK: eptype = 2; break; default: printf("Un-supported type\n"); return -EOPNOTSUPP; } hcchar.eptype = eptype; hcchar.multicnt = 1; hcchar.devaddr = usb_pipedevice(pipe); hcchar.chdis = 0; hcchar.chen = 1; /* * Check the buffer address which should be 4-byte aligned and DMA * coherent */ //do_copy = !dma_coherent(data_buf) || ((uintptr_t)data_buf & 0x3); do_copy = 1;//(uintptr_t)data_buf & 0x3; aligned_buf = do_copy ? ctrl->align_buf : data_buf; if (do_copy && (dir == EPDIR_OUT)) memcpy(aligned_buf, data_buf, size); if (dir == EPDIR_OUT) flush_dcache_range(aligned_buf, aligned_buf + roundup(size, ARCH_DMA_MINALIGN)); writel(hctsiz.d32, ®->Host.hchn[ch_num].hctsizn); writel((uint32_t)aligned_buf, ®->Host.hchn[ch_num].hcdman); writel(hcchar.d32, ®->Host.hchn[ch_num].hccharn); ret = dwc_wait_for_complete(dev, ch_num); if (ret >= 0) { /* Calculate actual transferred length */ transferred = (dir == EPDIR_IN) ? inpkt_length - ret : ret; if (dir == EPDIR_IN) invalidate_dcache_range(aligned_buf, aligned_buf + roundup(transferred, ARCH_DMA_MINALIGN)); if (do_copy && (dir == EPDIR_IN)) memcpy(data_buf, aligned_buf, transferred); } /* Save data toggle */ hctsiz.d32 = readl(®->Host.hchn[ch_num].hctsizn); usb_settoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe), (hctsiz.pid >> 1)); if (ret < 0) { printf("%s Transfer stop code: %d\n", __func__, ret); return ret; } return transferred; } int usb_lowlevel_stop(int index) { pUSB_OTG_REG reg = (pUSB_OTG_REG)rkusb_active_hcd->regbase; HPRT0_DATA hprt0 = { .d32 = 0 }; GUSBCFG_DATA gusbcfg = { .d32 = 0 }; /* Stop connect and put everything of port state in reset. */ hprt0.prtena = 1; hprt0.prtenchng = 1; hprt0.prtconndet = 1; writel(hprt0.d32, ®->Host.hprt); gusbcfg.d32 = 0x1400; writel(gusbcfg.d32, ®->Core.gusbcfg); return 0; } static void dwc2_reinit(pUSB_OTG_REG regbase) { pUSB_OTG_REG reg = regbase; GUSBCFG_DATA gusbcfg = { .d32 = 0 }; GRSTCTL_DATA grstctl = { .d32 = 0 }; GINTSTS_DATA gintsts = { .d32 = 0 }; GAHBCFG_DATA gahbcfg = { .d32 = 0 }; RXFIFOSIZE_DATA grxfsiz = { .d32 = 0 }; HCINTMSK_DATA hcintmsk = { .d32 = 0 }; TXFIFOSIZE_DATA gnptxfsiz = { .d32 = 0 }; const int timeout = 10000; int i; /* Wait for AHB idle */ for (i = 0; i < timeout; i++) { udelay(1); grstctl.d32 = readl(®->Core.grstctl); if (grstctl.ahbidle) break; } if (i == timeout) { printf("DWC2 Init error AHB Idle\n"); return; } /* Restart the Phy Clock */ writel(0x0, ®->ClkGate.PCGCR); /* Core soft reset */ grstctl.csftrst = 1; writel(grstctl.d32, ®->Core.grstctl); for (i = 0; i < timeout; i++) { udelay(1); grstctl.d32 = readl(®->Core.grstctl); if (!grstctl.csftrst) break; } if (i == timeout) { printf("DWC2 Init error reset fail\n"); return; } /* Set 16bit PHY if & Force host mode */ gusbcfg.d32 = readl(®->Core.gusbcfg); gusbcfg.phyif = 1; gusbcfg.forcehstmode = 1; gusbcfg.forcedevmode = 0; writel(gusbcfg.d32, ®->Core.gusbcfg); /* Wait for force host mode effect, it may takes 100ms */ for (i = 0; i < timeout; i++) { udelay(10); gintsts.d32 = readl(®->Core.gintsts); if (gintsts.curmod) break; } if (i == timeout) { printf("DWC2 Init error force host mode fail\n"); return; } /* * Config FIFO * The non-periodic tx fifo and rx fifo share one continuous * piece of IP-internal SRAM. */ grxfsiz.rxfdep = DWC2_RXFIFO_DEPTH; writel(grxfsiz.d32, ®->Core.grxfsiz); gnptxfsiz.nptxfstaddr = DWC2_RXFIFO_DEPTH; gnptxfsiz.nptxfdep = DWC2_NPTXFIFO_DEPTH; writel(gnptxfsiz.d32, ®->Core.gnptxfsiz); /* Init host channels */ hcintmsk.xfercomp = 1; hcintmsk.xacterr = 1; hcintmsk.stall = 1; hcintmsk.chhltd = 1; hcintmsk.bblerr = 1; for (i = 0; i < MAX_EPS_CHANNELS; i++) writel(hcintmsk.d32, ®->Host.hchn[i].hcintmaskn); /* Unmask interrupt & Use configure dma mode */ gahbcfg.glblintrmsk = 1; gahbcfg.hbstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR8; gahbcfg.dmaen = 1; writel(gahbcfg.d32, ®->Core.gahbcfg); printf("DWC2@0x%p init finished!\n", regbase); } int usb_lowlevel_init(int index, enum usb_init_type init, void **controller) { pUSB_OTG_REG reg = (pUSB_OTG_REG)rkusb_active_hcd->regbase; struct dwc_ctrl *dwcctl; dwcctl = malloc(sizeof(struct dwc_ctrl)); if (!dwcctl) return -ENOMEM; dwcctl->otgReg = reg; dwcctl->rootdev = 0; dwcctl->align_buf = memalign(USB_DMA_MINALIGN, DMA_SIZE); if (!dwcctl->align_buf) return -ENOMEM; dwc2_reinit(reg); *controller = dwcctl; return 0; } int submit_bulk_msg(struct usb_device *dev, unsigned long pipe, void *buffer, int length) { ep_dir_t data_dir; int pid; int ret = 0; if (usb_pipetype(pipe) != PIPE_BULK) { debug("non-bulk pipe (type=%lu)", usb_pipetype(pipe)); return -1; } if (usb_pipein(pipe)) data_dir = EPDIR_IN; else if (usb_pipeout(pipe)) data_dir = EPDIR_OUT; else return -1; pid = usb_gettoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe)); if (pid) pid = DWC_HCTSIZ_DATA1; else pid = DWC_HCTSIZ_DATA0; ret = dwc2_transfer(dev, pipe, length, pid, data_dir, 0, buffer); if (ret < 0) return -1; dev->act_len = ret; dev->status = 0; return 0; } int submit_control_msg(struct usb_device *dev, unsigned long pipe, void *buffer, int length, struct devrequest *setup) { int ret = 0; struct dwc_ctrl *ctrl = dev->controller; ep_dir_t data_dir; if (usb_pipetype(pipe) != PIPE_CONTROL) { debug("non-control pipe (type=%lu)", usb_pipetype(pipe)); return -1; } if (usb_pipedevice(pipe) == ctrl->rootdev) { if (!ctrl->rootdev) dev->speed = USB_SPEED_HIGH; return dwc2_submit_root(dev, pipe, buffer, length, setup); } if (usb_pipein(pipe)) data_dir = EPDIR_IN; else if (usb_pipeout(pipe)) data_dir = EPDIR_OUT; else return -1; /* Setup Phase */ if (dwc2_transfer(dev, pipe, 8, PID_SETUP, EPDIR_OUT, 0, (u8 *)setup) < 0) return -1; /* Data Phase */ if (length > 0) { ret = dwc2_transfer(dev, pipe, length, PID_DATA1, data_dir, 0, buffer); if (ret < 0) return -1; } /* Status Phase */ if (dwc2_transfer(dev, pipe, 0, PID_DATA1, !data_dir, 0, NULL) < 0) return -1; dev->act_len = ret; dev->status = 0; return 0; } int submit_int_msg(struct usb_device *dev, unsigned long pipe, void *buffer, int length, int interval) { return 0; }
static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); struct ehci_qh *qh; unsigned long flags; int rc; spin_lock_irqsave (&ehci->lock, flags); rc = usb_hcd_check_unlink_urb(hcd, urb, status); if (rc) goto done; switch (usb_pipetype (urb->pipe)) { // case PIPE_CONTROL: // case PIPE_BULK: default: qh = (struct ehci_qh *) urb->hcpriv; if (!qh) break; switch (qh->qh_state) { case QH_STATE_LINKED: case QH_STATE_COMPLETING: unlink_async(ehci, qh); break; case QH_STATE_UNLINK: case QH_STATE_UNLINK_WAIT: /* already started */ break; case QH_STATE_IDLE: WARN_ON(1); break; } break; case PIPE_INTERRUPT: qh = (struct ehci_qh *) urb->hcpriv; if (!qh) break; switch (qh->qh_state) { case QH_STATE_LINKED: intr_deschedule (ehci, qh); /* FALL THROUGH */ case QH_STATE_IDLE: qh_completions (ehci, qh); break; default: ehci_dbg (ehci, "bogus qh %p state %d\n", qh, qh->qh_state); goto done; } /* reschedule QH iff another request is queued */ if (!list_empty (&qh->qtd_list) && HC_IS_RUNNING (hcd->state)) { rc = qh_schedule(ehci, qh); /* An error here likely indicates handshake failure * or no space left in the schedule. Neither fault * should happen often ... * * FIXME kill the now-dysfunctional queued urbs */ if (rc != 0) ehci_err(ehci, "can't reschedule qh %p, err %d", qh, rc); } break; case PIPE_ISOCHRONOUS: // itd or sitd ... // wait till next completion, do it then. // completion irqs can wait up to 1024 msec, break; } done: spin_unlock_irqrestore (&ehci->lock, flags); return rc; }