static inline void ehci_qtd_init(struct ehci_qtd *qtd ) { dma_addr_t dma = ehci_virt_to_dma(qtd); memset (qtd, 0, sizeof *qtd); qtd->qtd_dma = dma; qtd->hw_token = (QTD_STS_HALT); qtd->hw_next = EHCI_LIST_END(); qtd->hw_alt_next = EHCI_LIST_END(); }
static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t dma) { memset (qtd, 0, sizeof *qtd); qtd->qtd_dma = dma; qtd->hw_token = cpu_to_le32 (QTD_STS_HALT); qtd->hw_next = EHCI_LIST_END(ehci); qtd->hw_alt_next = EHCI_LIST_END(ehci); INIT_LIST_HEAD (&qtd->qtd_list); }
void create_qtd_dummy(void) { int n; struct ehci_qtd * qtd, *qtd_next; qtd=qtd_dummy_first; for(n=0;;n++) { qtd_next=(struct ehci_qtd *) (((((u32) qtd)+sizeof(struct ehci_qtd)+31) & ~31)); ehci_qtd_init(qtd); //qtd_fill( qtd, 0, 0, QTD_STS_HALT, 0); if(n<3) { qtd->hw_next= QTD_NEXT(qtd_next->qtd_dma); qtd->hw_alt_next= EHCI_LIST_END(); //QTD_NEXT(qtd_next->qtd_dma); ehci_dma_map_bidir((void *) qtd,sizeof(struct ehci_qtd)); } else { ehci_dma_map_bidir(qtd,sizeof(struct ehci_qtd)); break; } qtd=qtd_next; } }
void reinit_ehci_headers(void) { init_qh_and_qtd(); create_qtd_dummy(); ehci->async= qh_pointer[0]; ehci->asyncqh= qh_pointer[1]; in_qh=qh_pointer[2]; out_qh=qh_pointer[3]; dummy_qh=qh_pointer[4]; ehci_dma_unmap_bidir((dma_addr_t) ehci->async,sizeof(struct ehci_qh)); ehci->async->ehci = ehci; ehci->async->qtd_head = NULL; ehci->async->qh_dma = ehci_virt_to_dma(ehci->async); ehci->async->hw_next = QH_NEXT(dummy_qh->qh_dma/* ehci->async->qh_dma*/); ehci->async->hw_info1 = cpu_to_hc32( QH_HEAD); ehci->async->hw_info2 = cpu_to_hc32( 0); ehci->async->hw_token = cpu_to_hc32( QTD_STS_HALT); ehci->async->hw_qtd_next =EHCI_LIST_END(); ehci->async->hw_alt_next =EHCI_LIST_END(); //QTD_NEXT(get_qtd_dummy()); ehci_dma_map_bidir(ehci->async,sizeof(struct ehci_qh)); ehci_dma_unmap_bidir((dma_addr_t)ehci->asyncqh,sizeof(struct ehci_qh)); ehci->asyncqh->ehci = ehci; ehci->asyncqh->qtd_head = NULL; ehci->asyncqh->qh_dma = ehci_virt_to_dma(ehci->asyncqh); ehci_dma_unmap_bidir((dma_addr_t)in_qh,sizeof(struct ehci_qh)); in_qh->ehci = ehci; in_qh->qtd_head = NULL; in_qh->qh_dma = ehci_virt_to_dma(in_qh); ehci_dma_map_bidir(in_qh,sizeof(struct ehci_qh)); ehci_dma_unmap_bidir((dma_addr_t)out_qh,sizeof(struct ehci_qh)); out_qh->ehci = ehci; out_qh->qtd_head = NULL; out_qh->qh_dma = ehci_virt_to_dma(out_qh); ehci_dma_map_bidir(out_qh,sizeof(struct ehci_qh)); }
int ehci_mem_init (void) { int i; u32 ptr = 0x1800000 - DEFAULT_I_TDPS * sizeof(__le32); ehci->periodic = (u32*)ptr; ehci->periodic_dma = ehci_virt_to_dma(ehci->periodic); for (i = 0; i < DEFAULT_I_TDPS; i++) ehci->periodic[i] = EHCI_LIST_END(); ehci_writel( ehci->periodic_dma, &ehci->regs->frame_list ); for(i=0;i<EHCI_MAX_QTD;i++) { ptr -= sizeof(struct ehci_qtd); ehci->qtds[i] = (struct ehci_qtd*)(ptr); } ehci->qtd_used = 0; ptr -= sizeof(struct ehci_qh); ehci->asyncqh = (struct ehci_qh*)ptr; ehci->asyncqh->ehci = ehci; ehci->asyncqh->qh_dma = ehci_virt_to_dma(ehci->asyncqh); ehci->asyncqh->qtd_head = NULL; ptr -= sizeof(struct ehci_qh); ehci->async = (struct ehci_qh*)ptr; ehci->async->ehci = ehci; ehci->async->qh_dma = ehci_virt_to_dma(ehci->async); ehci->async->qtd_head = NULL; return 0; }
/* remember to add cleanup code (above) if you add anything here */ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) { int i; /* QTDs for control/bulk/intr transfers */ ehci->qtd_pool = dma_pool_create ("ehci_qtd", ehci_to_hcd(ehci)->self.controller, sizeof (struct ehci_qtd), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->qtd_pool) { goto fail; } /* QHs for control/bulk/intr transfers */ ehci->qh_pool = dma_pool_create ("ehci_qh", ehci_to_hcd(ehci)->self.controller, sizeof(struct ehci_qh_hw), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->qh_pool) { goto fail; } ehci->async = ehci_qh_alloc (ehci, flags); if (!ehci->async) { goto fail; } /* ITD for high speed ISO transfers */ ehci->itd_pool = dma_pool_create ("ehci_itd", ehci_to_hcd(ehci)->self.controller, sizeof (struct ehci_itd), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->itd_pool) { goto fail; } /* SITD for full/low speed split ISO transfers */ ehci->sitd_pool = dma_pool_create ("ehci_sitd", ehci_to_hcd(ehci)->self.controller, sizeof (struct ehci_sitd), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->sitd_pool) { goto fail; } /* Hardware periodic table */ ehci->periodic = (__le32 *) dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller, ehci->periodic_size * sizeof(__le32), &ehci->periodic_dma, 0); if (ehci->periodic == NULL) { goto fail; } for (i = 0; i < ehci->periodic_size; i++) ehci->periodic [i] = EHCI_LIST_END(ehci); /* software shadow of hardware table */ ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); if (ehci->pshadow != NULL) return 0; fail: ehci_dbg (ehci, "couldn't init memory\n"); ehci_mem_cleanup (ehci); return -ENOMEM; }
static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) { int i; ehci->qtd_pool = dma_pool_create ("ehci_qtd", ehci_to_hcd(ehci)->self.controller, sizeof (struct ehci_qtd), 32 , 4096 ); if (!ehci->qtd_pool) { goto fail; } ehci->qh_pool = dma_pool_create ("ehci_qh", ehci_to_hcd(ehci)->self.controller, sizeof(struct ehci_qh_hw), 32 , 4096 ); if (!ehci->qh_pool) { goto fail; } ehci->async = ehci_qh_alloc (ehci, flags); if (!ehci->async) { goto fail; } ehci->itd_pool = dma_pool_create ("ehci_itd", ehci_to_hcd(ehci)->self.controller, sizeof (struct ehci_itd), 32 , 4096 ); if (!ehci->itd_pool) { goto fail; } ehci->sitd_pool = dma_pool_create ("ehci_sitd", ehci_to_hcd(ehci)->self.controller, sizeof (struct ehci_sitd), 32 , 4096 ); if (!ehci->sitd_pool) { goto fail; } ehci->periodic = (__le32 *) dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller, ehci->periodic_size * sizeof(__le32), &ehci->periodic_dma, 0); if (ehci->periodic == NULL) { goto fail; } if (ehci->use_dummy_qh) { struct ehci_qh_hw *hw; ehci->dummy = ehci_qh_alloc(ehci, flags); if (!ehci->dummy) goto fail; hw = ehci->dummy->hw; hw->hw_next = EHCI_LIST_END(ehci); hw->hw_qtd_next = EHCI_LIST_END(ehci); hw->hw_alt_next = EHCI_LIST_END(ehci); hw->hw_token &= ~QTD_STS_ACTIVE; ehci->dummy->hw = hw; for (i = 0; i < ehci->periodic_size; i++) ehci->periodic[i] = ehci->dummy->qh_dma; } else { for (i = 0; i < ehci->periodic_size; i++) ehci->periodic[i] = EHCI_LIST_END(ehci); } ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); if (ehci->pshadow != NULL) return 0; fail: ehci_dbg (ehci, "couldn't init memory\n"); ehci_mem_cleanup (ehci); return -ENOMEM; }
static unsigned short periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) { __hc32 *hw_p = &ehci->periodic [frame]; union ehci_shadow *q = &ehci->pshadow [frame]; unsigned usecs = 0; struct ehci_qh_hw *hw; while (q->ptr) { switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { case Q_TYPE_QH: hw = q->qh->hw; if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe)) usecs += q->qh->usecs; if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << (8 + uframe))) usecs += q->qh->c_usecs; hw_p = &hw->hw_next; q = &q->qh->qh_next; break; default: if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) { ehci_dbg (ehci, "ignoring FSTN cost ...\n"); } hw_p = &q->fstn->hw_next; q = &q->fstn->fstn_next; break; case Q_TYPE_ITD: if (q->itd->hw_transaction[uframe]) usecs += q->itd->stream->usecs; hw_p = &q->itd->hw_next; q = &q->itd->itd_next; break; case Q_TYPE_SITD: if (q->sitd->hw_uframe & cpu_to_hc32(ehci, 1 << uframe)) { if (q->sitd->hw_fullspeed_ep & cpu_to_hc32(ehci, 1<<31)) usecs += q->sitd->stream->usecs; else usecs += HS_USECS_ISO (188); } if (q->sitd->hw_uframe & cpu_to_hc32(ehci, 1 << (8 + uframe))) { usecs += q->sitd->stream->c_usecs; } hw_p = &q->sitd->hw_next; q = &q->sitd->sitd_next; break; } } #ifdef DEBUG if (usecs > 100) ehci_err (ehci, "uframe %d sched overrun: %d usecs\n", frame * 8 + uframe, usecs); #endif return usecs; }
/* remember to add cleanup code (above) if you add anything here */ static int ehci_mem_init(struct ehci_hcd *ehci, gfp_t flags) { int i; g_usb_pool_count = 0; g_debug_qtd_allocated = 0; g_debug_qH_allocated = 0; g_alloc_map = 0; if (cpu_is_mx37()) use_iram_qtd = 0; else use_iram_qtd = 1; usb_pool_initialize(USB_IRAM_BASE_ADDR + IRAM_TD_SIZE * IRAM_NTD * 2, USB_IRAM_SIZE - IRAM_TD_SIZE * IRAM_NTD * 2, 32); if (!ehci->iram_buffer[0]) { ehci->iram_buffer[0] = alloc_iram_buf(); ehci->iram_buffer_v[0] = IO_ADDRESS(ehci->iram_buffer[0]); ehci->iram_buffer[1] = alloc_iram_buf(); ehci->iram_buffer_v[1] = IO_ADDRESS(ehci->iram_buffer[1]); } /* QTDs for control/bulk/intr transfers */ ehci->qtd_pool = dma_pool_create("ehci_qtd", ehci_to_hcd(ehci)->self.controller, sizeof(struct ehci_qtd), 32/* byte alignment (for hw parts) */ , 4096 /* can't cross 4K */); if (!ehci->qtd_pool) goto fail; /* QHs for control/bulk/intr transfers */ ehci->qh_pool = dma_pool_create("ehci_qh", ehci_to_hcd(ehci)->self.controller, sizeof(struct ehci_qh), 32 /* byte alignment (for hw parts) */ , 4096 /* can't cross 4K */); if (!ehci->qh_pool) goto fail; ehci->async = ehci_qh_alloc(ehci, flags); if (!ehci->async) goto fail; /* ITD for high speed ISO transfers */ ehci->itd_pool = dma_pool_create("ehci_itd", ehci_to_hcd(ehci)->self.controller, sizeof(struct ehci_itd), 32/* byte alignment (for hw parts) */ , 4096 /* can't cross 4K */); if (!ehci->itd_pool) goto fail; /* SITD for full/low speed split ISO transfers */ ehci->sitd_pool = dma_pool_create("ehci_sitd", ehci_to_hcd(ehci)->self.controller, sizeof(struct ehci_sitd), 32/* byte alignment (for hw parts) */ , 4096 /* can't cross 4K */); if (!ehci->sitd_pool) goto fail; ehci->periodic = (__le32 *) dma_alloc_coherent(ehci_to_hcd(ehci)->self.controller, ehci->periodic_size * sizeof(__le32), &ehci->periodic_dma, 0); if (ehci->periodic == NULL) goto fail; for (i = 0; i < ehci->periodic_size; i++) ehci->periodic[i] = EHCI_LIST_END(ehci); /* software shadow of hardware table */ ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); if (ehci->pshadow != NULL) return 0; fail: ehci_dbg(ehci, "couldn't init memory\n"); ehci_mem_cleanup(ehci); return -ENOMEM; }
int tiny_ehci_init(void) { int i; ehci = &_ehci; if(usb_os_init()<0) return -1; if(1) { // From Hermes: ohci mem is readed from dev/mload: (ehci init is from here) /* int fd; fd = os_open("/dev/mload",1); if(fd<0) return -1; ehci= (struct ehci_hcd *) os_ioctlv(fd, MLOAD_GET_EHCI_DATA ,0,0,0); os_close(fd); */ ehci=swi_mload_EHCI_data(); // stops EHCI ehci_writel( 0x00010020 , &ehci->regs->command); do { if(!(ehci_readl( &ehci->regs->command) & 1))break; } while(1); ehci_dma_map_bidir(ehci,sizeof(struct ehci_hcd)); for (i = 0; i < DEFAULT_I_TDPS; i++) { ehci->periodic [i] = EHCI_LIST_END(); ehci_dma_map_bidir((void *) ehci->periodic [i],4); } reinit_ehci_headers(); ////////////////////////////////////////////////////////////////////////////////////////////// /* WARNING: This ignore the port 1 (external) and 2,3 (internals) for USB 2.0 operations */ /* from cIOS mload 1.6 port 1 is forced to USB 1.1. Only port 0 can work as USB 2.0 */ ehci->num_port=1; //ehci_writel( 0x00080021, &ehci->regs->command); //ehci_writel(0, &ehci->regs->frame_list); ehci_writel(ehci->async->qh_dma, &ehci->regs->async_next); ehci_writel (/*INTR_MASK*/STS_PCD, &ehci->regs->intr_enable); #define t125us (1) ehci_writel( (t125us<<16) | 0x0021 , &ehci->regs->command); ehci_readl( &ehci->regs->command); //swi_mload_led_on(); //swi_mload_call_func(hola,NULL,NULL); ///////////////////////////////////////////////////////////////////////////////////////////// } return 0; }
void init_qh_and_qtd(void) { int n; struct ehci_qtd * qtd; struct ehci_qh * qh; if(!qh_header) { //u32 mem = (u32) USB_Alloc(4096*3); //mem=(mem+4095) & ~4095; qh_header= (struct ehci_qh *) ehci->async;//mem; qtd_header= (struct ehci_qtd *) ehci->qtds[0]; } qtd=qtd_header;//= (struct ehci_qtd *) (((u32)qh_header)+4096); for(n=0;n<EHCI_MAX_QTD;n++) { ehci->qtds[n]=qtd; memset((void *) ehci->qtds[n], 0, sizeof(struct ehci_qtd)); ehci_dma_map_bidir((void *) ehci->qtds[n],sizeof(struct ehci_qtd)); qtd=(struct ehci_qtd *) (((((u32) qtd)+sizeof(struct ehci_qtd)+31) & ~31)); } for(n=0;n<EHCI_MAX_QTD;n++) { memset((void *) qtd, 0, sizeof(struct ehci_qtd)); ehci_dma_map_bidir((void *) qtd,sizeof(struct ehci_qtd)); qtd=(struct ehci_qtd *) (((((u32) qtd)+sizeof(struct ehci_qtd)+31) & ~31)); } qtd_dummy_first=qtd; qh=qh_header; for(n=0;n<6;n++) { qh_pointer[n]=qh; memset((void *) qh_pointer[n], 0, sizeof(struct ehci_qh)); qh->qh_dma = ehci_virt_to_dma(qh); qh_pointer[n]->hw_info1 = cpu_to_hc32((QH_HEAD*(n!=0))); qh_pointer[n]->hw_info2 = cpu_to_hc32(0); qh_pointer[n]->hw_token = cpu_to_hc32( QTD_STS_HALT); qh=(struct ehci_qh *) (((((u32) qh)+sizeof(struct ehci_qh)+31) & ~31)); qh_pointer[n]->hw_next = QH_NEXT( ehci_virt_to_dma(qh)); qh_pointer[n]->hw_qtd_next =EHCI_LIST_END(); qh_pointer[n]->hw_alt_next = EHCI_LIST_END(); ehci_dma_map_bidir((void *) qh_pointer[n],sizeof(struct ehci_qh)); } n--; qh_pointer[n]->hw_next = QH_NEXT( ehci_virt_to_dma(qh_header)); ehci_dma_map_bidir((void *) qh_pointer[n],sizeof(struct ehci_qh)); }
/* one-time init, only for memory state */ static int ehci_init(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); u32 temp; int retval; u32 hcc_params; spin_lock_init(&ehci->lock); init_timer(&ehci->watchdog); ehci->watchdog.function = ehci_watchdog; ehci->watchdog.data = (unsigned long) ehci; init_timer(&ehci->iaa_watchdog); ehci->iaa_watchdog.function = ehci_iaa_watchdog; ehci->iaa_watchdog.data = (unsigned long) ehci; /* * hw default: 1K periodic list heads, one per frame. * periodic_size can shrink by USBCMD update if hcc_params allows. */ ehci->periodic_size = DEFAULT_I_TDPS; INIT_LIST_HEAD(&ehci->cached_itd_list); if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) return retval; /* controllers may cache some of the periodic schedule ... */ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); if (HCC_ISOC_CACHE(hcc_params)) // full frame cache ehci->i_thresh = 8; else // N microframes cached ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); ehci->reclaim = NULL; ehci->next_uframe = -1; ehci->clock_frame = -1; /* * dedicate a qh for the async ring head, since we couldn't unlink * a 'real' qh without stopping the async schedule [4.8]. use it * as the 'reclamation list head' too. * its dummy is used in hw_alt_next of many tds, to prevent the qh * from automatically advancing to the next td after short reads. */ ehci->async->qh_next.qh = NULL; ehci->async->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); ehci->async->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); ehci->async->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); ehci->async->hw_qtd_next = EHCI_LIST_END(ehci); ehci->async->qh_state = QH_STATE_LINKED; ehci->async->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); /* clear interrupt enables, set irq latency */ if (log2_irq_thresh < 0 || log2_irq_thresh > 6) log2_irq_thresh = 0; temp = 1 << (16 + log2_irq_thresh); if (HCC_CANPARK(hcc_params)) { /* HW default park == 3, on hardware that supports it (like * NVidia and ALI silicon), maximizes throughput on the async * schedule by avoiding QH fetches between transfers. * * With fast usb storage devices and NForce2, "park" seems to * make problems: throughput reduction (!), data errors... */ if (park) { park = min(park, (unsigned) 3); temp |= CMD_PARK; temp |= park << 8; } ehci_dbg(ehci, "park %d\n", park); } if (HCC_PGM_FRAMELISTLEN(hcc_params)) { /* periodic schedule size can be smaller than default */ temp &= ~(3 << 2); temp |= (EHCI_TUNE_FLS << 2); switch (EHCI_TUNE_FLS) { case 0: ehci->periodic_size = 1024; break; case 1: ehci->periodic_size = 512; break; case 2: ehci->periodic_size = 256; break; default: BUG(); } } ehci->command = temp; return 0; }