/*! * otg_pci_isr() - interrupt service handler */ irqreturn_t otg_pci_isr(int irq, void *data, struct pt_regs *r) { struct pci_dev *pci_dev = data; struct otg_dev *otg_dev = pci_get_drvdata(pci_dev); int i; /* XXX spinlock */ RETURN_IRQ_HANDLED_UNLESS(otg_dev); TRACE_MSG0(otg_dev->PCI, "---------------------------------------- Start"); for (i = OTG_DRIVER_TCD; i < OTG_DRIVER_TYPES; i++) { struct otg_driver *otg_driver; otg_driver = otg_dev->otg_pci_driver->drivers[i]; CONTINUE_UNLESS(otg_driver); TRACE_MSG2(otg_dev->PCI, "try %s %d", otg_driver->name, i); CONTINUE_UNLESS(otg_driver->isr); RETURN_IRQ_HANDLED_IF_IRQ_HANDLED (otg_driver->isr(otg_dev, data)); TRACE_MSG2(otg_dev->PCI, "not handled by %s %d", otg_driver->name, i); } TRACE_MSG2(otg_dev->PCI, "try %s %d", otg_dev->otg_pci_driver->name, i); if (otg_dev->otg_pci_driver->isr) RETURN_IRQ_HANDLED_IF_IRQ_HANDLED (otg_dev->otg_pci_driver->isr(otg_dev, data)); /* XXX spinlock */ TRACE_MSG0(otg_dev->PCI, "---------------------------------------- IRQ_NONE ----"); return IRQ_NONE; }
/*! * mouse_cf_modinit() - module init * * This is called by the Linux kernel; either when the module is loaded * if compiled as a module, or during the system intialization if the * driver is linked into the kernel. * * This function will parse module parameters if required and then register * the mouse driver with the USB Device software. * */ static int mouse_cf_modinit (void) { int i; printk (KERN_INFO "%s: vendor_id: %04x product_id: %04x\n", __FUNCTION__, vendor_id, product_id); #if !defined(OTG_C99) mouse_cf_global_init(); mouse_cf_ops_init(); #endif /* defined(OTG_C99) */ MOUSE = otg_trace_obtain_tag(); TRACE_MSG2(MOUSE, "vendor_id: %04x product_id: %04x",vendor_id, product_id); //if (vendor_id) // mouse_composite_driver.idVendor = cpu_to_le16(vendor_id); //if (product_id) // mouse_composite_driver.idProduct = cpu_to_le16(product_id); // register as usb function driver TRACE_MSG0(MOUSE, "REGISTER COMPOSITE"); THROW_IF (usbd_register_composite_function (&mouse_composite_driver, "mouse-random-cf", NULL, mouse_arg_list, NULL), error); TRACE_MSG0(MOUSE, "REGISTER FINISHED"); CATCH(error) { otg_trace_invalidate_tag(MOUSE); return -EINVAL; } return 0; }
/*! * blan_fd_set_configuration - called to indicate set configuration request was received * @param function_instance * @param configuration * @return int */ int blan_fd_set_configuration (struct usbd_function_instance *function_instance, int configuration) { struct usb_network_private *npd = function_instance->privdata; int hs = usbd_high_speed(function_instance); //struct usbd_interface_instance *interface_instance = (struct usbd_interface_instance *)function_instance; TRACE_MSG2(NTT, "CONFIGURED: %d ip_addr: %08x", configuration, npd->ip_addr); //net_check_mesg(mesg_configured); if (npd->eem_os_buffer) net_os_dealloc_buffer(function_instance, npd->eem_os_data, npd->eem_os_buffer); npd->max_recv_urbs = hs ? NETWORK_START_URBS * 3 : NETWORK_START_URBS; otg_atomic_clr(&npd->recv_urbs_started[0]); #ifdef CONFIG_OTG_NETWORK_DOUBLE_OUT otg_atomic_clr(&npd->recv_urbs_started[1]); #endif /* CONFIG_OTG_NETWORK_DOUBLE_OUT */ npd->eem_os_data = npd->eem_os_buffer = NULL; //npd->flags |= NETWORK_CONFIGURED; npd->altsetting = 0; if ((npd->flags & NETWORK_OPEN)) net_os_send_notification_later(function_instance); TRACE_MSG1(NTT, "START RECV npd->flags: %04x", npd->flags); net_fd_start(function_instance); TRACE_MSG1(NTT, "CONFIGURED npd->flags: %04x", npd->flags); return 0; }
void pmic_idi_handler(void) { TRACE_MSG0(REMOVE_TCD, "--"); pmic_otg_wakeup(); #if 0 #if 1 t_sensor_bits sense_bits; if (pmic_get_sensors(&sense_bits)) { printk(KERN_INFO "%s: pmic_get_sensors() failed\n", __FUNCTION__); return; } TRACE_MSG2(REMOVE_TCD, "MC13783 EVENT: IDGNDS: %d IDFLOATS: %d", sense_bits.sense_id_gnds, sense_bits.sense_id_floats); otg_event(tcd_instance->otg, (sense_bits.sense_id_gnds ? ID_GND : ID_GND_) | (sense_bits.sense_id_floats ? ID_FLOAT : ID_FLOAT_), REMOVE_TCD, "MC13783 IDI"); #else otg_event(tcd_instance->otg, (pmic_check_sense(sense_id_gnds) ? ID_GND : ID_GND_) | (pmic_check_sense(sense_id_floats) ? ID_FLOAT : ID_FLOAT_), REMOVE_TCD, "MC13783 IDI"); #endif #endif }
/*! blan_fd_urb_received_ep0 - callback for sent URB * * Handles notification that an urb has been sent (successfully or otherwise). * * @return non-zero for failure. */ int blan_fd_urb_received_ep0 (struct usbd_urb *urb, int urb_rc) { TRACE_MSG2(NTT,"urb: %p status: %d", urb, urb->status); RETURN_EINVAL_IF (USBD_URB_OK != urb->status); // TRACE_MSG1(NTT,"%s", urb->buffer); // QQSV is this really a NUL-terminated string??? return -EINVAL; // caller will de-allocate }
/*!ecm_fd_set_interface * * @brief called to indicate set interface request was received * @param function_instance * @param wIndex * @param altsetting * @return int */ int ecm_fd_set_interface (struct usbd_function_instance *function_instance, int wIndex, int altsetting) { struct usb_network_private *npd = function_instance->privdata; TRACE_MSG2(NTT, "SET INTERFACE[%02x] altsetting: %02x", wIndex, altsetting); npd->altsetting = altsetting; TRACE_MSG3(NTT, "ipaddr: %08x flags: %04x altsetting: %02x", npd->ip_addr, npd->flags, npd->altsetting); return (altsetting ? net_fd_start : net_fd_stop) (function_instance); }
/*! net_fd_start_xmit_nocrc * @brief - start sending a buffer * * @param function_instance - pointer to this function instance * @param buffer buffer containing data to send * @param len - length of data to send * @param data instance data * @return: 0 if all OK * -EINVAL, -EUNATCH, -ENOMEM * rc from usbd_start_in_urb() if that fails (is != 0, may be one of err values above) * Note: -ECOMM is interpreted by calling routine as signal to leave IF stopped. */ int net_fd_start_xmit_nocrc (struct usbd_function_instance *function_instance, u8 *buffer, int len, void *data) { struct usb_network_private *npd = function_instance->privdata; struct usbd_urb *urb = NULL; int rc; int in_pkt_sz; u8 *cp; u32 crc; #ifndef CONFIG_OTG_NETWORK_DOUBLE_IN int xmit_index = 0; int endpoint_index = BULK_IN_A; #else /* CONFIG_OTG_NETWORK_DOUBLE_IN */ int xmit_index = (otg_atomic_read(&npd->xmit_urbs_started[0]) <= otg_atomic_read(&npd->xmit_urbs_started[1])) ? 0 : 1; int endpoint_index = (xmit_index) ? BULK_IN_B : BULK_IN_A; #endif /* CONFIG_OTG_NETWORK_DOUBLE_IN */ TRACE_MSG7(NTT,"npd: %p function: %p flags: %04x len: %d endpoint_index: %d xmit_started: %d %d", npd, function_instance, npd->flags, len, endpoint_index, otg_atomic_read(&npd->xmit_urbs_started[0]), otg_atomic_read(&npd->xmit_urbs_started[1])); in_pkt_sz = usbd_endpoint_wMaxPacketSize(function_instance, endpoint_index, usbd_high_speed(function_instance)); /* allocate urb 5 bytes larger than required */ if (!(urb = usbd_alloc_urb (function_instance, endpoint_index, len + 5 + in_pkt_sz, net_fd_urb_sent_bulk ))) { u8 epa = usbd_endpoint_bEndpointAddress(function_instance, endpoint_index, usbd_high_speed(function_instance)); TRACE_MSG2(NTT,"urb alloc failed len: %d endpoint: %02x", len, epa); printk(KERN_ERR"%s: urb alloc failed len: %d endpoint: %02x\n", __FUNCTION__, len, epa); return -ENOMEM; } urb->actual_length = len; memcpy (urb->buffer, buffer, len); urb->function_privdata = data; urb->actual_length = len; otg_atomic_add(urb->actual_length, &npd->queued_bytes); otg_atomic_inc(&npd->xmit_urbs_started[xmit_index]); if ((rc = usbd_start_in_urb (urb))) { TRACE_MSG1(NTT,"FAILED: %d", rc); printk(KERN_ERR"%s: FAILED: %d\n", __FUNCTION__, rc); urb->function_privdata = NULL; otg_atomic_sub(urb->actual_length, &npd->queued_bytes); otg_atomic_dec(&npd->xmit_urbs_started[xmit_index]); usbd_free_urb (urb); return rc; } return 0; }
/*! * mxc_hrt_start_timer() - start a timer for otg state machine * Set or reset timer to interrupt in number of uS (micro-seconds). * * XXX There may be a floor or minimum that can be effectively set. * XXX We have seen an occasional problem with US(25) for discharge for example. * * @param otg * @param usec */ int mxc_hrt_start_timer(struct otg_instance *otg, int usec) { TRACE_MSG1(OCD, "usec: %d", usec); mxc_hr_usec_set = usec; //TRACE_MSG1 (OCD, "usec: %d", usec); mxc_hr_active = FALSE; TRACE_MSG1(OCD, "resetting active: %d", mxc_hr_active); del_timer(&hr_timer); RETURN_ZERO_UNLESS(usec); mxc_hr_active = TRUE; TRACE_MSG1(OCD, "setting active: %d", mxc_hr_active); if (mxc_hr_usec_set >= 1000000) { hr_timer.expires = jiffies + ((mxc_hr_usec_set/1000000)*mxc_hr_jiffy_per_sec); hr_timer.arch_cycle_expires = get_arch_cycles(jiffies); TRACE_MSG4 (OCD, "usec: %u jiffies: %8u expires: %8u arch_cycle_expires: %8u LONG", usec, jiffies, hr_timer.expires, hr_timer.arch_cycle_expires); } else { hr_timer.expires = jiffies; hr_timer.arch_cycle_expires = get_arch_cycles(jiffies); if (mxc_hr_usec_set < 100) { TRACE_MSG1(OCD, "usec: %d set to minimum 100", mxc_hr_usec_set); mxc_hr_usec_set = 100; } hr_timer.arch_cycle_expires += nsec_to_arch_cycle(mxc_hr_usec_set * 1000); TRACE_MSG2(OCD, "arch_cycle_expires: %d arch_cycles_per_jiffy: %d", hr_timer.arch_cycle_expires, arch_cycles_per_jiffy); while (hr_timer.arch_cycle_expires >= arch_cycles_per_jiffy) { hr_timer.expires++; hr_timer.arch_cycle_expires -= arch_cycles_per_jiffy; } TRACE_MSG4 (OCD, "usec: %u jiffies: %8u expires: %8u arch_cycle_expires: %8u SHORT", usec, jiffies, hr_timer.expires, hr_timer.arch_cycle_expires); } add_timer(&hr_timer); return 0; }
/*! * mxc_gptcr_start_timer() - start a timer for otg state machine * Set or reset timer to interrupt in number of uS (micro-seconds). * * XXX There may be a floor or minimum that can be effectively set. * XXX We have seen an occasional problem with US(25) for discharge for example. * * @param otg * @param usec */ int mxc_gptcr_start_timer(struct otg_instance *otg, int usec) { u32 ticks; u32 match; unsigned long flags; local_irq_save (flags); //TRACE_MSG2(OCD, "usec: %d CNT: %08x", usec, *_reg_GPT_GPTCNT); if (usec && (usec < 100)) { usec = 100; TRACE_MSG2(OCD, "usec: %d CNT: %08x", usec, *_reg_GPT_GPTCNT); } /* * Disable Channel 3 compare. */ *_reg_GPT_GPTCR &= ~(0x7 << 26); mxc_gptcr_usec_set = usec; if (usec) { mxc_gptcr_ticks_set = ticks = (u32 ) mxc_gptcr_ticks(usec); mxc_gptcr_match_set = 0; mxc_gptcr_active = 0; /* * Compute and set match register */ mxc_gptcr_match_set = match = *_reg_GPT_GPTCNT + ticks; *_reg_GPT_GPTOCR3 = match; mxc_gptcr_active = 1; TRACE_MSG6(OCD, "cnt: %08x match: %08x GPTCNT: %08x GPTCR: %08x GPTSR: %08x GPTOCR3: %08x\n", mxc_gptcr_ticks_set, mxc_gptcr_match_set, *_reg_GPT_GPTCNT, *_reg_GPT_GPTCR, *_reg_GPT_GPTSR, *_reg_GPT_GPTOCR3); /* * Enable interrupt */ *_reg_GPT_GPTIR |= (0x01 << 2); } local_irq_restore (flags); return 0; }
/*! net_fd_recv_urb_mdlm * @brief callback to process a received URB * * @param urb - pointer to received urb * @param rc - dummy parameter * @return non-zero for failure. */ int net_fd_recv_urb_mdlm(struct usbd_urb *urb, int rc) { struct usbd_function_instance *function_instance = urb->function_instance; struct usb_network_private *npd = function_instance->privdata; void *os_data = NULL; void *os_buffer = NULL; int crc_bad = 0; int trim = 0; int len; u32 crc; u32 temmp; len = urb->actual_length; trim = 0; //TRACE_MSG2(NTT, "status: %d actual_length: %d", urb->status, urb->actual_length); //RETURN_EINVAL_IF (urb->status == USBD_URB_OK); os_data = urb->function_privdata; //TRACE_MSG2(NTT, "os_data: %x os_buffer: %x", os_data, os_buffer); #if defined(CONFIG_OTG_NETWORK_BLAN_PADAFTER) { /* This version simply checks for a correct CRC along the * entire packet. Some UDC's have trouble with some packet * sizes, this allows us to add pad bytes after the CRC. */ u8 *src = urb->buffer; int copied; // XXX this should work, but the MIPS optimizer seems to get it wrong.... //copied = (len < urb->wMaxPacketSize) ? 0 : ((len / urb->wMaxPacketSize) - 1) * urb->wMaxPacketSize; if (len < urb->wMaxPacketSize*2) copied = 0; else { int pkts = ((len - urb->wMaxPacketSize) / urb->wMaxPacketSize); copied = (pkts - 1) * urb->wMaxPacketSize; } len -= copied; crc = CRC32_INIT; for (; copied-- > 0 ; crc = COMPUTE_FCS (crc, *os_buffer++ = *src++)); for (; (len-- > 0) && (CRC32_GOOD != crc); crc = COMPUTE_FCS (crc, *os_buffer++ = *src++)); trim = len + 4; if (CRC32_GOOD != crc) { TRACE_MSG1(NTT,"AAA frame: %03x", urb->framenum); THROW_IF(npd->seen_crc, crc_error); } else npd->seen_crc = 1; } //#else /* defined(CONFIG_OTG_NETWORK_BLAN_PADAFTER) */ #elif defined(CONFIG_OTG_NETWORK_BLAN_CRC) || defined(CONFIG_OTG_NETWORK_SAFE_CRC) /* * The CRC can be sent in two ways when the size of the transfer * ends up being a multiple of the packetsize: * * | * <data> <CRC><CRC><CRC><CRC>|<???> case 1 * <data> <NUL><CRC><CRC><CRC>|<CRC> case 2 * <data> <NUL><CRC><CRC><CRC><CRC>| case 3 * <data> <NUL><CRC><CRC><CRC>|<CRC> | case 4 * | * * This complicates CRC checking, there are four scenarios: * * 1. length is 1 more than multiple of packetsize with a trailing byte * 2. length is 1 more than multiple of packetsize * 3. length is multiple of packetsize * 4. none of the above * * Finally, even though we always compute CRC, we do not actually throw * things away until and unless we have previously seen a good CRC. * This allows backwards compatibility with hosts that do not support * adding a CRC to the frame. * */ // test if 1 more than packetsize multiple if (1 == (len % urb->wMaxPacketSize)) { u8 *cp = urb->buffer + len - 1 - 4; // copy and CRC up to the packetsize boundary crc = crc32_nocopy(urb->buffer, len - 1, CRC32_INIT); if (TRACE_VERBOSE) TRACE_MSG7(NTT,"A CRC nocopy: %08x %08x len: %d CRC: %02x %02x %02x %02x", CRC32_GOOD, crc, len, cp[0], cp[1], cp[2], cp[3]); // if the CRC is good then this is case 1 if (CRC32_GOOD != crc) { crc = crc32_nocopy(urb->buffer + len - 1, 1, crc); if (CRC32_GOOD != crc) { //crc_errors[len%64]++; TRACE_MSG3(NTT,"A CRC error %08x %08x %03x", CRC32_GOOD, crc, urb->framenum); printk(KERN_INFO"%s: A CRC\n", __FUNCTION__); npd->seen_crc_error = 1; THROW_IF(npd->seen_crc, crc_error); } else npd->seen_crc = 1; } else npd->seen_crc = 1; } else { u8 *cp = urb->buffer + len - 4; crc = crc32_nocopy(urb->buffer, len, CRC32_INIT); if (TRACE_VERBOSE) TRACE_MSG7(NTT,"B CRC nocopy: %08x %08x len: %d CRC: %02x %02x %02x %02x", CRC32_GOOD, crc, len, cp[0], cp[1], cp[2], cp[3]); if (CRC32_GOOD != crc) { //crc_errors[len%64]++; TRACE_MSG3(NTT,"B CRC error %08x %08x %03x", CRC32_GOOD, crc, urb->framenum); if (TRACE_VERBOSE) { TRACE_MSG2(NTT, "status: %d actual_length: %d", urb->status, urb->actual_length); TRACE_NRECV(NTT, 32, urb->buffer); TRACE_MSG0(NTT, "--"); TRACE_RECV(NTT, urb->actual_length, urb->buffer); } printk(KERN_INFO"%s: B CRC\n", __FUNCTION__); npd->seen_crc_error = 1; THROW_IF(npd->seen_crc, crc_error); } else npd->seen_crc = 1; // XXX shorten by 4 bytes? } // trim IFF we are paying attention to crc if (npd->seen_crc) trim = 4; #endif /* defined(CONFIG_OTG_NETWORK_BLAN_CRC) ...*/ if (net_fd_recv_buffer(function_instance, urb->buffer, len, os_data, crc_bad, trim)) { TRACE_MSG0(NTT, "FAILED"); net_os_dealloc_buffer(function_instance, os_data, os_buffer); } // catch a simple error, just increment missed error and general error CATCH(error) { //TRACE_MSG4(NTT,"CATCH(error) urb: %p status: %d len: %d function: %p", // urb, urb->status, urb->actual_length, function_instance); // catch a CRC error CATCH(crc_error) { crc_bad = 1; npd->seen_crc_error = 1; } } return 0; }
/*! net_fd_start_xmit_mdlm * @brief - start sending a buffer * * @param function_instance - function instance pointer * @param buffer * @param len * @param data * * @return: 0 if all OK * -EINVAL, -EUNATCH, -ENOMEM * rc from usbd_start_in_urb() if that fails (is != 0, may be one of err values above) * Note: -ECOMM is interpreted by calling routine as signal to leave IF stopped. */ int net_fd_start_xmit_mdlm (struct usbd_function_instance *function_instance, u8 *buffer, int len, void *data) { struct usb_network_private *npd = function_instance->privdata; struct usbd_urb *urb = NULL; int rc; u32 crc; #ifndef CONFIG_OTG_NETWORK_DOUBLE_IN int xmit_index = 0; int endpoint_index = BULK_IN_A; #else /* CONFIG_OTG_NETWORK_DOUBLE_IN */ int xmit_index = (otg_atomic_read(&npd->xmit_urbs_started[0]) <= otg_atomic_read(&npd->xmit_urbs_started[1])) ? 0 : 1; int endpoint_index = (xmit_index) ? BULK_IN_B : BULK_IN_A; #endif /* CONFIG_OTG_NETWORK_DOUBLE_IN */ int in_pkt_sz = usbd_endpoint_wMaxPacketSize(function_instance, xmit_index, usbd_high_speed(function_instance)); if (TRACE_VERBOSE) TRACE_MSG8(NTT,"npd: %p flags: %04x len: %d endpoint_index: %d " "xmit_index: %d xmit_started: %d %d in_pkt_sz: %d", npd, npd->flags, len, endpoint_index, xmit_index, otg_atomic_read(&npd->xmit_urbs_started[0]), otg_atomic_read(&npd->xmit_urbs_started[1]), in_pkt_sz); #if defined(CONFIG_OTG_NETWORK_BLAN_CRC) || defined(CONFIG_OTG_NETWORK_SAFE_CRC) /* allocate urb 5 bytes larger than required */ if (!(urb = usbd_alloc_urb (function_instance, endpoint_index, len + 5 + 4 + in_pkt_sz, net_fd_urb_sent_bulk ))) { u8 epa = usbd_endpoint_bEndpointAddress(function_instance, endpoint_index, usbd_high_speed(function_instance)); TRACE_MSG2(NTT,"urb alloc failed len: %d endpoint: %02x", len, epa); return -ENOMEM; } urb->actual_length = len; /* copy and crc len bytes */ crc = crc32_copy(urb->buffer, buffer, len, CRC32_INIT); if ((urb->actual_length % in_pkt_sz) == (in_pkt_sz - 4)) { /* no longer in Kconfig - change undef to define to active padbyte */ #undef CONFIG_OTG_NETWORK_PADBYTE #ifdef CONFIG_OTG_NETWORK_PADBYTE // add a pad byte if required to ensure a short packet, usbdnet driver // will correctly handle pad byte before or after CRC, but the MCCI driver // wants it before the CRC. crc = crc32_pad(urb->buffer + urb->actual_length, 1, crc); urb->actual_length++; #else /* CONFIG_OTG_NETWORK_PADBYTE */ urb->flags |= USBD_URB_SENDZLP; TRACE_MSG2(NTT,"setting ZLP: urb: %p flags: %x", urb, urb->flags); #endif /* CONFIG_OTG_NETWORK_PADBYTE */ } crc = ~crc; urb->buffer[urb->actual_length++] = crc & 0xff; urb->buffer[urb->actual_length++] = (crc >> 8) & 0xff; urb->buffer[urb->actual_length++] = (crc >> 16) & 0xff; urb->buffer[urb->actual_length++] = (crc >> 24) & 0xff; #if defined(CONFIG_OTG_NETWORK_BLAN_FERMAT) if (npd->fermat) fermat_encode(urb->buffer, urb->actual_length); #endif #else /* defined(CONFIG_OTG_NETWORK_BLAN_CRC) ...*/ /* allocate urb with no buffer */ #ifdef CONFIG_OTG_NETWORK_XMIT_OS if (!(urb = usbd_alloc_urb (function_instance, endpoint_index, 0, net_fd_urb_sent_bulk ))) { u8 epa = usbd_endpoint_bEndpointAddress(function_instance, endpoint_index, usbd_high_speed(function_instance)); TRACE_MSG2(NTT,"urb alloc failed len: %d endpoint: %02x", len, epa); return -ENOMEM; } urb->actual_length = len; urb->buffer = buffer; #else /* CONFIG_OTG_NETWORK_XMIT_OS */ if (!(urb = usbd_alloc_urb (function_instance, endpoint_index, len + 5 + 4 + in_pkt_sz, net_fd_urb_sent_bulk ))) { u8 epa = usbd_endpoint_bEndpointAddress(function_instance, endpoint_index, usbd_high_speed(function_instance)); TRACE_MSG2(NTT,"urb alloc failed len: %d endpoint: %02x", len, epa); printk(KERN_ERR"%s: urb alloc failed len: %d endpoint: %02x\n", __FUNCTION__, len, epa); return -ENOMEM; } urb->actual_length = len; memcpy (urb->buffer, buffer, len); #endif /* CONFIG_OTG_NETWORK_XMIT_OS */ urb->flags |= ((urb->actual_length % in_pkt_sz) == 0) ? USBD_URB_SENDZLP : 0; #endif /* defined(CONFIG_OTG_NETWORK_BLAN_CRC) ...*/ if (TRACE_VERBOSE) TRACE_MSG3(NTT,"urb: %p buf: %p priv: %p", urb, data, urb->function_privdata); urb->function_privdata = data; otg_atomic_add(urb->actual_length, &npd->queued_bytes); otg_atomic_inc(&npd->xmit_urbs_started[xmit_index]); if ((rc = usbd_start_in_urb (urb))) { TRACE_MSG1(NTT,"FAILED: %d", rc); printk(KERN_ERR"%s: FAILED: %d\n", __FUNCTION__, rc); urb->function_privdata = NULL; otg_atomic_sub(urb->actual_length, &npd->queued_bytes); otg_atomic_dec(&npd->xmit_urbs_started[xmit_index]); usbd_free_urb (urb); return rc; } return 0; }
/*! net_fd_recv_urb_ecm * @brief - callback to process a received URB * * @param urb - received urb * @param rc dummy * @return non-zero for failure. */ int net_fd_recv_urb_ecm(struct usbd_urb *urb, int rc) { struct usbd_function_instance *function_instance = urb->function_instance; struct usb_network_private *npd = function_instance->privdata; void *os_data = NULL; u8 *os_buffer, *net_frame; int crc_bad = 0; int trim = 0; int len; u32 crc; #ifndef CONFIG_OTG_NETWORK_DOUBLE_OUT int endpoint_index = BULK_OUT_A; #else /* CONFIG_OTG_NETWORK_DOUBLE_OUT */ int endpoint_index = BULK_OUT_A; #endif /* CONFIG_OTG_NETWORK_DOUBLE_OUT */ int out_pkt_sz = usbd_endpoint_wMaxPacketSize(function_instance, endpoint_index, usbd_high_speed(function_instance)); len = urb->actual_length; trim = 0; TRACE_MSG2(NTT, "status: %d actual_length: %d", urb->status, urb->actual_length); //RETURN_EINVAL_IF (urb->status == USBD_URB_OK); THROW_UNLESS((os_data = net_os_alloc_buffer(function_instance, &os_buffer, len)), error); net_frame = os_buffer; //TRACE_MSG2(NTT, "os_data: %x os_buffer: %x", os_data, os_buffer); /* * The CRC can be sent in two ways when the size of the transfer * ends up being a multiple of the packetsize: * * | * <data> <CRC><CRC><CRC><CRC>|<???> case 1 * <data> <NUL><CRC><CRC><CRC>|<CRC> case 2 * <data> <NUL><CRC><CRC><CRC><CRC>| case 3 * <data> <NUL><CRC><CRC><CRC>|<CRC> | case 4 * | * * This complicates CRC checking, there are four scenarios: * * 1. length is 1 more than multiple of packetsize with a trailing byte * 2. length is 1 more than multiple of packetsize * 3. length is multiple of packetsize * 4. none of the above * * Finally, even though we always compute CRC, we do not actually throw * things away until and unless we have previously seen a good CRC. * This allows backwards compatibility with hosts that do not support * adding a CRC to the frame. * */ // test if 1 more than packetsize multiple if (1 == (len % out_pkt_sz)) { #if defined(CONFIG_OTG_NETWORK_BLAN_CRC) || defined(CONFIG_OTG_NETWORK_SAFE_CRC) // copy and CRC up to the packetsize boundary crc = crc32_copy(os_buffer, urb->buffer, len - 1, CRC32_INIT); os_buffer += len - 1; // if the CRC is good then this is case 1 if (CRC32_GOOD != crc) { crc = crc32_copy(os_buffer, urb->buffer + len - 1, 1, crc); os_buffer += 1; if (CRC32_GOOD != crc) { //crc_errors[len%64]++; TRACE_MSG2(NTT,"A CRC error %08x %03x", crc, urb->framenum); THROW_IF(npd->seen_crc, crc_error); } else npd->seen_crc = 1; } else npd->seen_crc = 1; #endif /* defined(CONFIG_OTG_NETWORK_BLAN_CRC) ...*/ } else { #if defined(CONFIG_OTG_NETWORK_BLAN_CRC) || defined(CONFIG_OTG_NETWORK_SAFE_CRC) crc = crc32_copy(os_buffer, urb->buffer, len, CRC32_INIT); os_buffer += len; if (CRC32_GOOD != crc) { //crc_errors[len%64]++; TRACE_MSG2(NTT,"B CRC error %08x %03x", crc, urb->framenum); THROW_IF(npd->seen_crc, crc_error); } else npd->seen_crc = 1; #else /* !defined(CONFIG_OTG_NETWORK_BLAN_CRC) ...*/ memcpy (os_buffer, urb->buffer, len); #endif /* !defined(CONFIG_OTG_NETWORK_BLAN_CRC) ...*/ } // trim IFF we are paying attention to crc #if defined(CONFIG_OTG_NETWORK_BLAN_CRC) || defined(CONFIG_OTG_NETWORK_SAFE_CRC) if (npd->seen_crc) trim = 4; #endif /* defined(CONFIG_OTG_NETWORK_BLAN_CRC) ...*/ if (net_fd_recv_buffer(function_instance, net_frame, len, os_data, crc_bad, trim)) { TRACE_MSG0(NTT, "FAILED"); net_os_dealloc_buffer(function_instance, os_data, os_buffer); } // catch a simple error, just increment missed error and general error CATCH(error) { //TRACE_MSG4(NTT,"CATCH(error) urb: %p status: %d len: %d function: %p", // urb, urb->status, urb->actual_length, function_instance); // catch a CRC error CATCH(crc_error) { crc_bad = 1; } } return 0; //return usbd_start_out_urb (urb); }
/*! * mxc_init() - initial tcd setup * Allocate interrupts and setup hardware. * @param otg - otg instance */ void mxc_init (struct otg_instance *otg) { int timeout; unsigned long flags; //u32 mode = XCVR_D_SE0; u32 mode = XCVR_SE0_D_NEW; TRACE_MSG0(otg->ocd->TAG, "FS_INIT"); local_irq_save (flags); fs_wl(OTG_SYS_CTRL, 0x0); /* 2. Ensure hardware is reset and cleared */ // XXX //fs_clear_words((volatile u32 *)IO_ADDRESS(OTG_DMA_BASE), (32*16/4)); fs_clear_words((volatile u32 *)IO_ADDRESS(OTG_DMA_BASE), (16*16/4)); fs_clear_words((void *)IO_ADDRESS(OTG_FUNC_BASE), 0x200); mxc_main_clock_off(); //fs_wl_set(OCD, OTG_CORE_RST_CTRL, MODULE_RSTI2C | 0x3f); fs_wl(OTG_CORE_RST_CTRL, MODULE_RSTI2C | 0x3f); while (fs_rl(OTG_CORE_RST_CTRL)); /* 3. OTG Hardware Mode and clocks * set to diff, diff and Configure the OTG to behave as function */ TRACE_MSG0(otg->ocd->TAG, "3. OTG Software Mode and clock"); fs_orl(OTG_CORE_HWMODE, MODULE_CRECFG_SHNP); // set to software hnp mxc_set_transceiver_mode(mxc_transceiver_mode); TRACE_MSG2(otg->ocd->TAG, "FS_INIT: set hwmode: %08x want %08x", fs_rl(OTG_CORE_HWMODE), MODULE_CRECFG_SHNP); fs_andl(OTG_CORE_HNP_CSTAT, ~0x00000800); fs_rl(OTG_CORE_HNP_CSTAT); //fs_wl_set(OCD, OTG_CORE_HNP_T3PCR, 0x00000000); fs_wl(OTG_CORE_HNP_T3PCR, 0x00000000); fs_rl(OTG_CORE_HNP_T3PCR); TRACE_MSG0(otg->ocd->TAG, "6. Enable "); TRACE_MSG0(otg->ocd->TAG, "enable core interrupts"); fs_wl(OTG_CORE_CINT_STEN, 0); fs_wl(OTG_CORE_CINT_STEN, MODULE_ASHNPINT_EN | MODULE_ASHCINT_EN | MODULE_HNPINT_EN | MODULE_FCINT | MODULE_HCINT); TRACE_MSG0(otg->ocd->TAG, "enable host interrupts"); fs_wl(OTG_CORE_HINT_STEN, HNP_I2COTGINT_EN | HNP_AWAITBTO_EN | HNP_AIDLEBDTO_EN | HNP_SRPSUCFAIL_EN | HNP_SRPINT_EN | HNP_VBUSERROR_EN | HNP_ABSEVAILD_EN | HNP_ABUSVALID_EN | HNP_MASSLVCHG_EN | HNP_IDCHANGE_EN); TRACE_MSG0(otg->ocd->TAG, "disable various host interrupts"); fs_wl(OTG_HOST_XYINT_STEN, 0); fs_wl(OTG_HOST_ETD_EN, 0); fs_wl(OTG_HOST_ETD_DONE, 0); fs_wl(OTG_HOST_SINT_STEN, 0); TRACE_MSG0(otg->ocd->TAG, "disable various function interrupts"); fs_wl(OTG_FUNC_XYINT_STEN, 0); fs_wl(OTG_FUNC_EP_EN, 0); fs_wl(OTG_FUNC_EP_DEN, 0); fs_wl(OTG_DMA_DINT_STEN, 0x3); //fs_wb(I2C_MASTER_INT_REG_ADD, 0xf0); //fs_wb(I2C_MASTER_INT_REG_ADD, 0x00); // XXX note that newer designs than the mx21 will also need to check // and/or set OTG_CORE_INTERRUPT_STEN TRACE_MSG0(otg->ocd->TAG, "--"); TRACE_MSG0(otg->ocd->TAG, "8. Ready "); TRACE_MSG1(otg->ocd->TAG, "CINT_STEN: %08x", fs_rl(OTG_CORE_CINT_STEN)); local_irq_restore (flags); }
/*! arc_add_buffer_to_dtd * * C.f. 39.16.5.3 - case 1: Link list is empty */ static void arc_add_buffer_to_dtd (struct pcd_instance *pcd, struct usbd_endpoint_instance *endpoint, int dir, int len, int offset) { struct otg_instance *otg = pcd->otg; struct usbd_urb *urb = endpoint->active_urb; struct arc_private_struct *privdata = endpoint->privdata; u8 hs = pcd->bus->high_speed; u8 physicalEndpoint = endpoint->physicalEndpoint[hs]; u8 bEndpointAddress = endpoint->bEndpointAddress[hs]; u8 epnum = bEndpointAddress & 0x3f; u16 wMaxPacketSize = endpoint->wMaxPacketSize[hs]; struct ep_queue_head *dQH = &udc_controller->ep_qh[2 * epnum + dir]; struct ep_td_struct *dtd = &(udc_controller->ep_dtd[2 * epnum + dir]); u32 mask = 0; int timeout1 = 0; int timeout2 = 0; u32 endptstat = -1; u32 endptprime = -1; u32 endptcomplete = -1; TRACE_MSG6(pcd->TAG, "[%2d] USBCMD: %08x ENDPTPRIME: %08x COMPLETE: %08x STATUS: %08x %s", 2*epnum+dir, UOG_USBCMD, UOG_ENDPTPRIME, UOG_ENDPTCOMPLETE, (u32)dQH->size_ioc_int_sts, (dir == ARC_DIR_OUT) ? "OUT" : "IN"); if (urb && urb->buffer) { TRACE_MSG4(pcd->TAG, "buffer: %x length: %d alloc: %d dir: %d ", urb->buffer, urb->actual_length, urb->buffer_length, dir); /* flush cache for IN */ if ((dir == ARC_DIR_IN) && urb->actual_length) dma_cache_maint(urb->buffer, urb->actual_length, DMA_TO_DEVICE); /* invalidate cache for OUT */ else if ((dir == ARC_DIR_OUT) && urb->buffer_length) dma_cache_maint(urb->buffer, urb->alloc_length, DMA_FROM_DEVICE); } /* Set size and interrupt on each dtd, Clear reserved field, * set pointers and flush from cache, and save in cur_dqh for dtd_releases() */ memset(dtd, 0, sizeof(struct ep_td_struct)); dtd->size_ioc_sts = cpu_to_le32(((len << DTD_LENGTH_BIT_POS) | DTD_IOC | DTD_STATUS_ACTIVE)); dtd->size_ioc_sts &= cpu_to_le32(~DTD_RESERVED_FIELDS); dtd->buff_ptr0 = cpu_to_le32(endpoint->active_urb ? (u32) (virt_to_phys (endpoint->active_urb->buffer + offset)) : 0); dtd->next_td_ptr = cpu_to_le32(DTD_NEXT_TERMINATE); dtd->next_td_virt = NULL; dma_cache_maint(dtd, sizeof(struct ep_td_struct), DMA_TO_DEVICE); privdata->cur_dqh = dQH; /* Case 1 - Step 1 - Write dQH next pointer and dQH terminate bit to 0 as single DWord */ dQH->next_dtd_ptr = cpu_to_le32( virt_to_phys((void *)dtd) & EP_QUEUE_HEAD_NEXT_POINTER_MASK); /* Case 1 - Step 2 - Clear active and halt bit */ dQH->size_ioc_int_sts &= le32_to_cpu(~(EP_QUEUE_HEAD_STATUS_ACTIVE | EP_QUEUE_HEAD_STATUS_HALT)); dma_cache_maint(dQH, sizeof(struct ep_queue_head), DMA_TO_DEVICE); /* Case 1 - Step 3 - Prime endpoint by writing ENDPTPRIME */ mask = (dir == ARC_DIR_OUT) ? (1 << epnum) : (1 << (epnum + 16)); /* Verify that endpoint PRIME is not set, wait if necessary. */ for (timeout1 = 0; (UOG_ENDPTPRIME & mask) && (timeout1 ++ < 100); udelay(1)); /* ep0 needs extra tests */ UNLESS(epnum) { /* C.f. 39.16.3.2.2 Data Phase */ UOG_ENDPTPRIME |= mask; for (timeout2 = 0; timeout2++ < 100; ) { endptprime = UOG_ENDPTPRIME; // order may be important endptstat = UOG_ENDPTSTAT; // we check stat after prime BREAK_IF(endptstat & mask); BREAK_UNLESS(endptprime & mask); } if (!(endptstat & mask) && !(endptprime & mask)) { TRACE_MSG2(pcd->TAG, "[%2d] ENDPTSETUPSTAT: %04x PREMATURE FAILUURE", 2*epnum+dir, UOG_ENDPTSETUPSTAT); } TRACE_MSG6(pcd->TAG, "[%2d] ENDPTPRIME %08x ENPTSTAT: %08x mask: %08x timeout: %d:%d SET", 2*epnum+dir, UOG_ENDPTPRIME, UOG_ENDPTSTAT, mask, timeout1, timeout2);; } /* epn general case */ else {
/*! * otg_pci_probe() - otg pci probe function * * Get the standard PCI resources allocated. * */ int __devinit otg_pci_probe (struct pci_dev *pci_dev, const struct pci_device_id *id, struct otg_pci_driver *otg_pci_driver) { struct otg_driver *otg_driver; struct otg_dev *otg_dev = NULL; int enabled = 0; int irq = 0; int region; u8 latency, limit; /* allocate otg_dev structure and fill in standard fields */ THROW_UNLESS((otg_dev = kmalloc(sizeof(struct otg_dev), SLAB_KERNEL)), error); memset(otg_dev, 0, sizeof(struct otg_dev)); otg_dev->PCI = otg_trace_obtain_tag(); //printk(KERN_INFO"%s: PCI %d\n", __FUNCTION__, otg_dev->PCI); //TRACE_MSG0(otg_dev->PCI, "TEST"); THROW_UNLESS((enabled = !pci_enable_device(pci_dev)), error); otg_dev->otg_pci_driver = otg_pci_driver; otg_dev->pci_regions = otg_pci_driver->pci_regions; pci_set_drvdata(pci_dev, otg_dev); printk(KERN_INFO"%s: pci_dev: %x otg_dev: %x drv_data: %x\n", __FUNCTION__, pci_dev, otg_dev, pci_get_drvdata(pci_dev)); for (region = 0; region < DEVICE_COUNT_RESOURCE; region++) { unsigned long resource_start; unsigned long resource_len; TRACE_MSG5(otg_dev->PCI, "[%2d] flags: %08x start: %08x end: %08x len: %08x", region, pci_resource_flags(pci_dev, region), pci_resource_start(pci_dev, region), pci_resource_end(pci_dev, region), pci_resource_len(pci_dev, region) ); CONTINUE_UNLESS(otg_dev->pci_regions & (1 << region)); resource_start = pci_resource_start(pci_dev, region); resource_len = pci_resource_len(pci_dev, region); TRACE_MSG5(otg_dev->PCI, "pci_dev: %x otg_dev: %x start: %lx len: %lx name: %s", pci_dev, otg_dev, resource_start, resource_len, otg_pci_driver->name); THROW_UNLESS(request_mem_region(resource_start, resource_len, otg_pci_driver->name), error); THROW_UNLESS((otg_dev->regs[region] = ioremap_nocache(resource_start, resource_len)), error); TRACE_MSG2(otg_dev->PCI, "regs[%d] %x", region, otg_dev->regs[region]); } THROW_UNLESS((irq = !request_irq(pci_dev->irq, otg_pci_isr, SA_SHIRQ, otg_pci_driver->name, pci_dev)), error); TRACE_MSG1(otg_dev->PCI, "irq: %d", pci_dev->irq); /* bad pci latencies can contribute to overruns - but where ?? */ pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &latency); pci_read_config_byte (pci_dev, PCI_MAX_LAT, &limit); TRACE_MSG2(otg_dev->PCI, "latency: %02x limit: %02x", latency, limit); if (latency && /* limit &&*/ (limit < latency)) { pci_write_config_byte (pci_dev, PCI_LATENCY_TIMER, limit); pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &latency); TRACE_MSG2(otg_dev->PCI, "latency: %02x limit: %02x", latency, limit); } /* XXX lock? */ otg_dev->id = otg_get_id(pci_dev); TRACE_MSG1(otg_dev->PCI, "id: %d", otg_dev->id); if (otg_devs) { TRACE_MSG2(otg_dev->PCI, "otg_devs: %x new: %x", otg_devs, otg_dev); otg_dev->next = otg_devs; } otg_devs = otg_dev; return 0; CATCH(error) { printk(KERN_INFO"%s: FAILED\n", __FUNCTION__); pci_set_drvdata(pci_dev, NULL); if (irq) free_irq(pci_dev->irq, otg_dev); otg_pci_free_dev(pci_dev, otg_dev); if (otg_dev) kfree(otg_dev); if (enabled) pci_disable_device(pci_dev); return -EINVAL; } }