/*! * mxc_hrt_start_timer() - start a timer for otg state machine * Set or reset timer to interrupt in number of uS (micro-seconds). * * XXX There may be a floor or minimum that can be effectively set. * XXX We have seen an occasional problem with US(25) for discharge for example. * * @param otg * @param usec */ int mxc_hrt_start_timer(struct otg_instance *otg, int usec) { TRACE_MSG1(OCD, "usec: %d", usec); mxc_hr_usec_set = usec; //TRACE_MSG1 (OCD, "usec: %d", usec); mxc_hr_active = FALSE; TRACE_MSG1(OCD, "resetting active: %d", mxc_hr_active); del_timer(&hr_timer); RETURN_ZERO_UNLESS(usec); mxc_hr_active = TRUE; TRACE_MSG1(OCD, "setting active: %d", mxc_hr_active); if (mxc_hr_usec_set >= 1000000) { hr_timer.expires = jiffies + ((mxc_hr_usec_set/1000000)*mxc_hr_jiffy_per_sec); hr_timer.arch_cycle_expires = get_arch_cycles(jiffies); TRACE_MSG4 (OCD, "usec: %u jiffies: %8u expires: %8u arch_cycle_expires: %8u LONG", usec, jiffies, hr_timer.expires, hr_timer.arch_cycle_expires); } else { hr_timer.expires = jiffies; hr_timer.arch_cycle_expires = get_arch_cycles(jiffies); if (mxc_hr_usec_set < 100) { TRACE_MSG1(OCD, "usec: %d set to minimum 100", mxc_hr_usec_set); mxc_hr_usec_set = 100; } hr_timer.arch_cycle_expires += nsec_to_arch_cycle(mxc_hr_usec_set * 1000); TRACE_MSG2(OCD, "arch_cycle_expires: %d arch_cycles_per_jiffy: %d", hr_timer.arch_cycle_expires, arch_cycles_per_jiffy); while (hr_timer.arch_cycle_expires >= arch_cycles_per_jiffy) { hr_timer.expires++; hr_timer.arch_cycle_expires -= arch_cycles_per_jiffy; } TRACE_MSG4 (OCD, "usec: %u jiffies: %8u expires: %8u arch_cycle_expires: %8u SHORT", usec, jiffies, hr_timer.expires, hr_timer.arch_cycle_expires); } add_timer(&hr_timer); return 0; }
int blan_start_recv_endpoint(struct usbd_function_instance *function_instance, int endpoint_index, otg_atomic_t *recv_urbs_started, char *msg) { struct usb_network_private *npd = function_instance->privdata; int i; int hs = usbd_high_speed(function_instance); int alloc_length = usbd_endpoint_transferSize(function_instance, endpoint_index, hs); if (TRACE_VERBOSE) TRACE_MSG4(NTT, "endpoint_index: %d recv_urbs_started: %d alloc_length: %d %s", endpoint_index, otg_atomic_read(recv_urbs_started), alloc_length, msg); while (otg_atomic_read(recv_urbs_started) < npd->max_recv_urbs ) { u8 *os_buffer = NULL; void *os_data = NULL; struct usbd_urb *urb; #ifdef DEBUG_CRC_SEEN if (npd->seen_crc_error) { TRACE_MSG0(NTT, "CRC ERROR NOT RESTARTING"); break; } #endif /* DEBUG_CRC_SEEN */ /* get os buffer - os_buffer is data, os_data is the os data structure */ os_data = net_os_alloc_buffer(function_instance, &os_buffer, alloc_length); /* allocate urb with no buffer */ /*allocate urb without buffer */ urb = usbd_alloc_urb(function_instance, endpoint_index, 0, net_fd_recv_urb2); /* start urb with buffer pointing at the os_buffer in os_data structure */ if (os_buffer && urb) { urb->function_privdata = os_data; urb->buffer = os_buffer; urb->flags |= npd->recv_urb_flags; urb->alloc_length = urb->buffer_length = alloc_length; if (usbd_start_out_urb(urb)) { net_os_dealloc_buffer(function_instance, os_data, os_buffer); urb->function_privdata = NULL; urb->buffer = NULL; usbd_free_urb(urb); } otg_atomic_inc(recv_urbs_started); continue; } TRACE_MSG1(NTT, "recv_urbs_started: %d FAILED EARLY", otg_atomic_read(recv_urbs_started)); if (os_buffer || os_data) net_os_dealloc_buffer(function_instance, os_data, os_buffer); if (urb) urb->buffer = NULL; usbd_free_urb(urb); break; } if (TRACE_VERBOSE) TRACE_MSG1(NTT, "recv_urbs_started: %d", otg_atomic_read(recv_urbs_started)); return 0; }
/*! net_fd_recv_urb2 - callback to process a received URB * * @param urb - pointer to copy of received urb, * @param rc - receiving urb result code * * @return non-zero for failure. */ int net_fd_recv_urb2(struct usbd_urb *urb, int rc) { struct usbd_function_instance *function_instance = urb->function_instance; struct usb_network_private *npd = function_instance->privdata; int hs = usbd_high_speed(function_instance); int endpoint_index = urb->endpoint_index; #ifndef CONFIG_OTG_NETWORK_DOUBLE_OUT int recv_index = 0; #else /* CONFIG_OTG_NETWORK_DOUBLE_OUT */ int recv_index = (endpoint_index == BULK_OUT_A) ? 0 : 1; #endif /* CONFIG_OTG_NETWORK_DOUBLE_OUT */ int alloc_length = usbd_endpoint_transferSize(function_instance, endpoint_index, hs); int status = urb->status; void *os_data; u8 *os_buffer; if (TRACE_VERY_VERBOSE) { TRACE_MSG4(NTT, "status: %d actual_length: %d bus status: %d device_state: %d", urb->status, urb->actual_length, usbd_get_device_status(function_instance), usbd_get_device_state(function_instance) ); TRACE_NRECV(NTT, 32, urb->buffer); TRACE_MSG0(NTT, "--"); TRACE_RECV(NTT, urb->actual_length, urb->buffer); } otg_atomic_dec(&npd->recv_urbs_started[recv_index]); /* process the data */ if (urb->status == USBD_URB_OK) npd->net_recv_urb(urb, rc); if (urb->status == USBD_URB_CANCELLED) net_os_dealloc_buffer(function_instance, urb->function_privdata, urb->buffer); /* disconnect os_data buffer from urb */ urb->function_privdata = NULL; urb->buffer = NULL; urb->function_instance = NULL; urb->status = USBD_URB_OK; usbd_free_urb(urb); if ((USBD_OK == usbd_get_device_status(function_instance)) && (STATE_CONFIGURED == usbd_get_device_state(function_instance))) { blan_start_recv(function_instance); } else { TRACE_MSG0(NTT, "NOT RESTARTING"); } return 0; }
/*! pmic_otg_event_bh - pmic otg event handler * @param data - otg instance */ void pmic_otg_event_bh(void *data) { struct otg_instance *otg = (struct otg_instance *) data; otg_current_t inputs; t_sensor_bits sense_bits; static BOOL force = TRUE; static otg_current_t inputs_saved = 0; if (pmic_get_sensors(&sense_bits)) { printk(KERN_INFO "%s: pmic_get_sensors() failed\n", __FUNCTION__); return; } TRACE_MSG6(REMOVE_TCD, "usb4v4s%c usb2v0s%c usb0v8s:%c id_gnds%c id_floats%c id_se1s%c", sense_bits.sense_usb4v4s ? ' ' : '/', sense_bits.sense_usb2v0s ? ' ' : '/', sense_bits.sense_usb0v8s ? ' ' : '/', sense_bits.sense_id_gnds ? ' ' : '/', sense_bits.sense_id_floats ? ' ' : '/', sense_bits.sense_se1s ? ' ' : '/'); inputs = (sense_bits.sense_usb4v4s ? VBUS_VLD : VBUS_VLD_) | (sense_bits. sense_usb2v0s ? (B_SESS_VLD | A_SESS_VLD) : (B_SESS_VLD_ | A_SESS_VLD_)) | (sense_bits.sense_usb0v8s ? B_SESS_END_ : B_SESS_END) | (sense_bits. sense_id_gnds ? ID_GND : ID_GND_) | (sense_bits.sense_id_floats ? ID_FLOAT : ID_FLOAT_) | (sense_bits. sense_se1s ? SE1_DET : SE1_DET_) | (det_dp_hi ? DP_HIGH : DP_HIGH_) | (det_dm_hi ? DM_HIGH : DM_HIGH_); // printk(KERN_INFO" inputs: %8X\n", inputs); TRACE_MSG4(REMOVE_TCD, "MC13783 EVENT: sense_bits: %8x otg inputs: %8x saved: %x diff: %x", sense_bits.sense_se1s, inputs, inputs_saved, inputs ^ inputs_saved); RETURN_UNLESS(force || (inputs ^ inputs_saved)); inputs_saved = inputs; otg_event(REMOVE_tcd_instance->otg, inputs, REMOVE_TCD, "PMIC OTG EVENT"); // gpio_config_int_en(2, 17, TRUE); // gpio_config_int_en(2, 16, TRUE); // gpio_clear_int (2, 17); // gpio_clear_int (2, 16); }
/*! * mxc_hrt_ocd_mod_init() - initial tcd setup * Allocate interrupts and setup hardware. */ int mxc_hrt_mod_init (void) { int res = 0; #if 0 //test timer for 10 sec init_timer (&hr_timer); hr_timer.expires = jiffies + 630; hr_timer.function = mxc_hrt_callback; add_timer(&hr_timer); #endif #if 1 init_timer (&hr_timer); hr_timer.expires = jiffies + 10; hr_timer.function = mxc_hrt_callback; hr_timer.arch_cycle_expires = get_arch_cycles(jiffies); hr_timer.arch_cycle_expires += nsec_to_arch_cycle(100 * 1000 * 1000); while (hr_timer.arch_cycle_expires >= arch_cycles_per_jiffy) { hr_timer.expires++; hr_timer.arch_cycle_expires -= arch_cycles_per_jiffy; } // add_timer(&hr_timer); #endif res = nsec_to_arch_cycle(100000); //printk(KERN_INFO"arch cycles for 100usec: %8X\n", res); res = arch_cycles_per_jiffy; mxc_hr_jiffy_per_sec = (nsec_to_arch_cycle(1000000000)/arch_cycles_per_jiffy); TRACE_MSG4(OCD, "arch cycles per jiffy: %8u Number of jiffy for 1 sec is %8u resolution: %8d nsec/cycle: %8u\n", arch_cycles_per_jiffy, mxc_hr_jiffy_per_sec, hr_time_resolution, nsec_to_arch_cycle(1) ); CATCH(error) { return -EINVAL; } return 0; }
/*! arc_read_rcv_buffer * * Recover number of bytes DMA'd to receive buffer, sync. */ int arc_read_rcv_buffer (struct pcd_instance *pcd, struct usbd_endpoint_instance *endpoint) { struct arc_private_struct *privdata = endpoint->privdata; struct ep_td_struct *curr_td = privdata->cur_dtd; struct ep_queue_head *qh = privdata->cur_dqh; struct usbd_urb *rx_urb = endpoint->rcv_urb; /* sync qh and td structures, note that urb-buffer was invalidated in arc_add_buffer_to_dtd() */ dma_cache_maint(qh, sizeof(struct ep_queue_head), DMA_FROM_DEVICE); dma_cache_maint(curr_td, sizeof(struct ep_td_struct), DMA_FROM_DEVICE); if (rx_urb) { int length = rx_urb->buffer_length - ((le32_to_cpu(curr_td->size_ioc_sts) & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS); if (TRACE_VERBOSE) TRACE_MSG4(pcd->TAG, "buffer_length: %d alloc_length: %d Len: %d (%x)" , rx_urb->buffer_length, rx_urb->alloc_length, length, le32_to_cpu(curr_td->size_ioc_sts)); return length; } TRACE_MSG1(pcd->TAG, "NO RCV URB (%x)" , le32_to_cpu(curr_td->size_ioc_sts)); return 0; }
void mc13783_otg_event_bh (void *arg) { u64 inputs; t_sense_bits sense_bits; static BOOL force = TRUE; static u64 inputs_saved = 0; // Note: power_ic has USB4V4S labelled as USBI, which is incorrect // Get the sense bits, return if any fail to be read. if ( (sense_bits.sense_usb4v4s = power_ic_event_sense_read(POWER_IC_EVENT_ATLAS_USBI)) < 0) { printk(KERN_INFO"%s: mc13783_get_sense() usb4v4s failed\n", __FUNCTION__); return; } if ( (sense_bits.sense_usb2v0s = power_ic_event_sense_read(POWER_IC_EVENT_ATLAS_USB2V0S)) < 0) { printk(KERN_INFO"%s: mc13783_get_sense() usb2v0s failed\n", __FUNCTION__); return; } if ( (sense_bits.sense_usb0v8s = power_ic_event_sense_read(POWER_IC_EVENT_ATLAS_USB0V8S)) < 0) { printk(KERN_INFO"%s: mc13783_get_sense() usb0v8s failed\n", __FUNCTION__); return; } if ( (sense_bits.sense_id_floats = power_ic_event_sense_read(POWER_IC_EVENT_ATLAS_ID_FLOAT)) < 0) { printk(KERN_INFO"%s: mc13783_get_sense() id_floats failed\n", __FUNCTION__); return; } if ( (sense_bits.sense_id_gnds = power_ic_event_sense_read(POWER_IC_EVENT_ATLAS_ID_GROUND)) < 0) { printk(KERN_INFO"%s: mc13783_get_sense() id_gnds failed\n", __FUNCTION__); return; } if ( (sense_bits.sense_se1s = power_ic_event_sense_read(POWER_IC_EVENT_ATLAS_SE1I)) < 0) { printk(KERN_INFO"%s: mc13783_get_sense() se1s failed\n", __FUNCTION__); return; } // Factory cable check meant for USB B device only. If IDGNDS is True // adjust the value of IDGNDS to be False, so the state machine thinks it's a // traditional device and not a Dual role device. #ifdef CONFIG_OTG_USB_PERIPHERAL if ( sense_bits.sense_id_gnds ) { sense_bits.sense_id_gnds = FALSE; TRACE_MSG0(TCD, "Factory Cable detected and IDGND modified to false"); // printk("Factory Cable detected and IDGND modified to false\n"); } #endif inputs = (sense_bits.sense_usb4v4s ? VBUS_VLD : VBUS_VLD_) | (sense_bits.sense_usb2v0s ? (B_SESS_VLD | A_SESS_VLD) : (B_SESS_VLD_ | A_SESS_VLD_)) | (sense_bits.sense_usb0v8s ? B_SESS_END_ : B_SESS_END) | (sense_bits.sense_id_gnds ? ID_GND : ID_GND_) | (sense_bits.sense_id_floats ? ID_FLOAT : ID_FLOAT_) | (sense_bits.sense_se1s ? SE1_DET : SE1_DET_) | (det_dp_hi ? DP_HIGH : DP_HIGH_) | (det_dm_hi ? DM_HIGH : DM_HIGH_); TRACE_MSG4(TCD, "MC13783 EVENT: sense_bits: %8x otg inputs: %8x saved: %x diff: %x", sense_bits.sense_se1s, inputs, inputs_saved, inputs ^ inputs_saved); RETURN_UNLESS(force || (inputs ^ inputs_saved)); inputs_saved = inputs; otg_event(tcd_instance->otg, inputs, TCD, "MC13783 OTG EVENT"); }
/*! * generic_cf_modinit() - module init * * This is called by the Linux kernel; either when the module is loaded * if compiled as a module, or during the system intialization if the * driver is linked into the kernel. * * This function will parse module parameters if required and then register * the generic driver with the USB Device software. * */ static int generic_cf_modinit (void) { int i; #if !defined(OTG_C99) //generic_cf_global_init(); #endif /* defined(OTG_C99) */ GENERIC = otg_trace_obtain_tag(); i = MODPARM(idVendor); printk (KERN_INFO "Model ID is %s",MODPARM(iProduct)); #if 0 TRACE_MSG4(GENERIC, "config_name: \"%s\" load_all: %d class_name: \"%s\" interface_names: \"%s\"", MODPARM(config_name) ? MODPARM(config_name) : "", MODPARM(load_all), MODPARM(class_name) ? MODPARM(class_name) : "", MODPARM(interface_names) ? MODPARM(interface_names) : ""); #else TRACE_MSG5(GENERIC, "config_name: \"%s\" load_all: %d class_name: \"%s\" interface_names: \"%s\" Serial: \"%s\"", generic_config_name(), MODPARM(load_all), MODPARM(class_name) ? MODPARM(class_name) : "", MODPARM(interface_names) ? MODPARM(interface_names) : "", MODPARM(iSerialNumber) ? MODPARM(iSerialNumber) : ""); #endif /* load config or configs */ if (preset_config_name() || MODPARM(load_all)) { if (preset_config_name()){ MODPARM(load_all) = 0; } printk (KERN_INFO "%s: config_name: \"%s\" load_all: %d\n", __FUNCTION__, generic_config_name() , MODPARM(load_all)); /* search for named config */ for (i = 0; ; i++) { struct generic_config *config = generic_configs + i; BREAK_UNLESS(config->interface_names); printk(KERN_INFO"%s: checking[%d] \"%s\"\n", __FUNCTION__, i, config->composite_driver.driver.name); if (MODPARM(iSerialNumber) && strlen(MODPARM(iSerialNumber)) && /* For the moment, we will only use serial number for msc and mtp. I suggest we come up with a more generic way to determine if a function driver needs to use the serial number. (for instance another function member. */ ( ( !strcmp(config->composite_driver.driver.name, "mtp")) || !strcmp(config->composite_driver.driver.name, "msc")) ){ config->device_description.iSerialNumber = MODPARM(iSerialNumber); } config->device_description.iProduct = MODPARM(iProduct); generic_cf_register(config, generic_config_name()); //printk(KERN_INFO"%s: loaded %s\n", __FUNCTION__, config->composite_driver.driver.name); } } else { struct generic_config *config = &generic_config; //printk (KERN_INFO "%s: idVendor: %04x idProduct: %04x\n", __FUNCTION__, MODPARM(idVendor), MODPARM(idProduct)); //printk (KERN_INFO "%s: class_name: \"%s\" _interface_names: \"%s\"\n", // __FUNCTION__, MODPARM(class_name), MODPARM(interface_names)); if (MODPARM(driver_name) && strlen(MODPARM(driver_name))) config->composite_driver.driver.name = MODPARM(driver_name); if (MODPARM(class_name) && strlen(MODPARM(class_name))) config->class_name = MODPARM(class_name); if (MODPARM(interface_names) && strlen(MODPARM(interface_names))) config->interface_names = MODPARM(interface_names); if (MODPARM(iConfiguration) && strlen(MODPARM(iConfiguration))) config->configuration_description.iConfiguration = MODPARM(iConfiguration); if (MODPARM(bDeviceClass)) config->device_description.bDeviceClass = MODPARM(bDeviceClass); if (MODPARM(bDeviceSubClass)) config->device_description.bDeviceSubClass = MODPARM(bDeviceSubClass); if (MODPARM(bDeviceProtocol)) config->device_description.bDeviceProtocol = MODPARM(bDeviceProtocol); if (MODPARM(idVendor)) config->device_description.idVendor = MODPARM(idVendor); else config->device_description.idVendor = CONFIG_OTG_GENERIC_VENDORID; if (MODPARM(idProduct)) config->device_description.idProduct = MODPARM(idProduct); else config->device_description.idProduct = CONFIG_OTG_GENERIC_PRODUCTID; if (MODPARM(bcdDevice)) config->device_description.bcdDevice = MODPARM(bcdDevice); else config->device_description.bcdDevice = CONFIG_OTG_GENERIC_BCDDEVICE; if (MODPARM(iManufacturer) && strlen(MODPARM(iManufacturer))) config->device_description.iManufacturer = MODPARM(iManufacturer); else config->device_description.iManufacturer = CONFIG_OTG_GENERIC_MANUFACTURER; if (MODPARM(iProduct) && strlen(MODPARM(iProduct))) config->device_description.iProduct = MODPARM(iProduct); else config->device_description.iProduct = CONFIG_OTG_GENERIC_PRODUCT_NAME; if (MODPARM(iSerialNumber) && strlen(MODPARM(iSerialNumber))){ config->device_description.iSerialNumber = MODPARM(iSerialNumber); } if (MODPARM(interface_names)) config->interface_names = MODPARM(interface_names); generic_cf_register(config, NULL); } return 0; }
/*! arc_add_buffer_to_dtd * * C.f. 39.16.5.3 - case 1: Link list is empty */ static void arc_add_buffer_to_dtd (struct pcd_instance *pcd, struct usbd_endpoint_instance *endpoint, int dir, int len, int offset) { struct otg_instance *otg = pcd->otg; struct usbd_urb *urb = endpoint->active_urb; struct arc_private_struct *privdata = endpoint->privdata; u8 hs = pcd->bus->high_speed; u8 physicalEndpoint = endpoint->physicalEndpoint[hs]; u8 bEndpointAddress = endpoint->bEndpointAddress[hs]; u8 epnum = bEndpointAddress & 0x3f; u16 wMaxPacketSize = endpoint->wMaxPacketSize[hs]; struct ep_queue_head *dQH = &udc_controller->ep_qh[2 * epnum + dir]; struct ep_td_struct *dtd = &(udc_controller->ep_dtd[2 * epnum + dir]); u32 mask = 0; int timeout1 = 0; int timeout2 = 0; u32 endptstat = -1; u32 endptprime = -1; u32 endptcomplete = -1; TRACE_MSG6(pcd->TAG, "[%2d] USBCMD: %08x ENDPTPRIME: %08x COMPLETE: %08x STATUS: %08x %s", 2*epnum+dir, UOG_USBCMD, UOG_ENDPTPRIME, UOG_ENDPTCOMPLETE, (u32)dQH->size_ioc_int_sts, (dir == ARC_DIR_OUT) ? "OUT" : "IN"); if (urb && urb->buffer) { TRACE_MSG4(pcd->TAG, "buffer: %x length: %d alloc: %d dir: %d ", urb->buffer, urb->actual_length, urb->buffer_length, dir); /* flush cache for IN */ if ((dir == ARC_DIR_IN) && urb->actual_length) dma_cache_maint(urb->buffer, urb->actual_length, DMA_TO_DEVICE); /* invalidate cache for OUT */ else if ((dir == ARC_DIR_OUT) && urb->buffer_length) dma_cache_maint(urb->buffer, urb->alloc_length, DMA_FROM_DEVICE); } /* Set size and interrupt on each dtd, Clear reserved field, * set pointers and flush from cache, and save in cur_dqh for dtd_releases() */ memset(dtd, 0, sizeof(struct ep_td_struct)); dtd->size_ioc_sts = cpu_to_le32(((len << DTD_LENGTH_BIT_POS) | DTD_IOC | DTD_STATUS_ACTIVE)); dtd->size_ioc_sts &= cpu_to_le32(~DTD_RESERVED_FIELDS); dtd->buff_ptr0 = cpu_to_le32(endpoint->active_urb ? (u32) (virt_to_phys (endpoint->active_urb->buffer + offset)) : 0); dtd->next_td_ptr = cpu_to_le32(DTD_NEXT_TERMINATE); dtd->next_td_virt = NULL; dma_cache_maint(dtd, sizeof(struct ep_td_struct), DMA_TO_DEVICE); privdata->cur_dqh = dQH; /* Case 1 - Step 1 - Write dQH next pointer and dQH terminate bit to 0 as single DWord */ dQH->next_dtd_ptr = cpu_to_le32( virt_to_phys((void *)dtd) & EP_QUEUE_HEAD_NEXT_POINTER_MASK); /* Case 1 - Step 2 - Clear active and halt bit */ dQH->size_ioc_int_sts &= le32_to_cpu(~(EP_QUEUE_HEAD_STATUS_ACTIVE | EP_QUEUE_HEAD_STATUS_HALT)); dma_cache_maint(dQH, sizeof(struct ep_queue_head), DMA_TO_DEVICE); /* Case 1 - Step 3 - Prime endpoint by writing ENDPTPRIME */ mask = (dir == ARC_DIR_OUT) ? (1 << epnum) : (1 << (epnum + 16)); /* Verify that endpoint PRIME is not set, wait if necessary. */ for (timeout1 = 0; (UOG_ENDPTPRIME & mask) && (timeout1 ++ < 100); udelay(1)); /* ep0 needs extra tests */ UNLESS(epnum) { /* C.f. 39.16.3.2.2 Data Phase */ UOG_ENDPTPRIME |= mask; for (timeout2 = 0; timeout2++ < 100; ) { endptprime = UOG_ENDPTPRIME; // order may be important endptstat = UOG_ENDPTSTAT; // we check stat after prime BREAK_IF(endptstat & mask); BREAK_UNLESS(endptprime & mask); } if (!(endptstat & mask) && !(endptprime & mask)) { TRACE_MSG2(pcd->TAG, "[%2d] ENDPTSETUPSTAT: %04x PREMATURE FAILUURE", 2*epnum+dir, UOG_ENDPTSETUPSTAT); } TRACE_MSG6(pcd->TAG, "[%2d] ENDPTPRIME %08x ENPTSTAT: %08x mask: %08x timeout: %d:%d SET", 2*epnum+dir, UOG_ENDPTPRIME, UOG_ENDPTSTAT, mask, timeout1, timeout2);; } /* epn general case */ else {
/*! * zasevb_modinit() - linux module initialization * * This needs to initialize the hcd, pcd and tcd drivers. This includes tcd and possibly hcd * for some architectures. * */ static int zasevb_modinit (void) { struct otg_instance *otg = NULL; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) struct clk *clk = clk_get(NULL, "usb_clk"); clk_enable(clk); clk_put(clk); #endif THROW_UNLESS((otg = otg_create()), error); mxc_pcd_ops_init(); #if !defined(OTG_C99) pcd_global_init(); fs_ocd_global_init(); zasevb_tcd_global_init(); fs_pcd_global_init(); #endif /* !defined(OTG_C99) */ ZAS = otg_trace_obtain_tag(otg, "zas"); mxc_procfs_init(); TRACE_MSG0(ZAS, "1. ZAS"); #if 0 /* ZAS EVB Platform setup */ TRACE_MSG4(ZAS, "BCTRL Version: %04x Status: %04x 1: %04x 2: %04x", readw(PBC_BASE_ADDRESS ), readw(PBC_BASE_ADDRESS + PBC_BSTAT), readw(PBC_BASE_ADDRESS + PBC_BCTRL1_SET), readw(PBC_BASE_ADDRESS + PBC_BCTRL2_SET)); #endif /* ZAS EVB Clock setup */ #if defined(CONFIG_ARCH_ARGONPLUS) || defined(CONFIG_ARCH_ARGONLV) #define ZASEVB_MULTIPLIER 12 #define ZASEVB_DIVISOR 775 // ~10. #else #define ZASEVB_MULTIPLIER 12 #define ZASEVB_DIVISOR 155 #endif TRACE_MSG0(ZAS, "2. Setup GPT"); THROW_UNLESS(ocd_instance = otg_set_ocd_ops(otg, &ocd_ops), error); REMOVE_OCD = ocd_instance->TAG; // XXX THROW_IF((ocd_ops.mod_init ? ocd_ops.mod_init() : 0), error); #if defined(CONFIG_OTG_GPTR) mxc_gptcr_mod_init(ZASEVB_DIVISOR, ZASEVB_MULTIPLIER); #endif /* defined(CONFIG_OTG_GPTR) */ #if defined(CONFIG_OTG_HRT) mxc_hrt_mod_init(otg, ZASEVB_DIVISOR, ZASEVB_MULTIPLIER); #endif /* defined(CONFIG_OTG_GPTR) */ #if !defined(CONFIG_USB_HOST) TRACE_MSG0(ZAS, "3. PCD"); THROW_UNLESS(REMOVE_pcd_instance = otg_set_pcd_ops(otg, &pcd_ops), error); REMOVE_PCD = REMOVE_pcd_instance->TAG; // XXX THROW_IF((pcd_ops.mod_init ? pcd_ops.mod_init() : 0), error); #else /* !defined(CONFIG_USB_HOST) */ printk(KERN_INFO"%s: PCD DRIVER N/A\n", __FUNCTION__); #endif /* !defined(CONFIG_USB_HOST) */ TRACE_MSG0(ZAS, "4. TCD"); THROW_UNLESS(REMOVE_tcd_instance = otg_set_tcd_ops(otg, &tcd_ops), error); REMOVE_TCD = REMOVE_tcd_instance->TAG; // XXX THROW_IF((tcd_ops.mod_init ? tcd_ops.mod_init() : 0), error); #ifdef OTG_USE_I2C TRACE_MSG0(ZAS, "0. I2C"); i2c_mod_init(otg); #endif #if defined(CONFIG_OTG_USB_HOST) || defined(CONFIG_OTG_USB_PERIPHERAL_OR_HOST)|| defined(CONFIG_OTG_DEVICE) TRACE_MSG0(ZAS, "5. Host"); THROW_UNLESS(hcd_instance = otg_set_hcd_ops(otg, &hcd_ops), error); HCD = hcd_instance->TAG; // XXX THROW_IF((hcd_ops.mod_init) ? hcd_ops.mod_init() : 0, error); #else /* defined(CONFIG_OTG_USB_HOST) || defined(CONFIG_OTG_USB_PERIPHERAL_OR_HOST)|| defined(CONFIG_OTG_DEVICE) */ printk(KERN_INFO"%s: HCD DRIVER N/A\n", __FUNCTION__); #endif /* defined(CONFIG_OTG_USB_HOST) || defined(CONFIG_OTG_USB_PERIPHERAL_OR_HOST)|| defined(CONFIG_OTG_DEVICE) */ TRACE_MSG0(ZAS, "6. Init & check"); THROW_IF((ocd_ops.mod_init ? ocd_ops.mod_init(otg) : 0), error); #if !defined(CONFIG_USB_HOST) THROW_IF((pcd_ops.mod_init ? pcd_ops.mod_init(otg) : 0), error); #endif /* !defined(CONFIG_USB_HOST) */ THROW_IF((tcd_ops.mod_init ? tcd_ops.mod_init(otg) : 0), error); #if defined(CONFIG_OTG_USB_HOST) || defined(CONFIG_OTG_USB_PERIPHERAL_OR_HOST)|| defined(CONFIG_OTG_DEVICE) THROW_IF((hcd_ops.mod_init) ? hcd_ops.mod_init(otg) : 0, error); #endif /* defined(CONFIG_OTG_USB_HOST) || defined(CONFIG_OTG_USB_PERIPHERAL_OR_HOST)|| defined(CONFIG_OTG_DEVICE) */ THROW_UNLESS(ocd_instance && (otg = ocd_instance->otg), error); TRACE_MSG0(ZAS, "7. otg_init"); if (MODPARM(serial_number_str) && strlen(MODPARM(serial_number_str))) { TRACE_MSG1(ZAS, "serial_number_str: %s", MODPARM(serial_number_str)); otg_serial_number (otg, MODPARM(serial_number_str)); } otg_init(otg); return 0; CATCH(error) { //zasevb_modexit(); return -EINVAL; } return 0; }