int ecm_fd_function_enable (struct usbd_function_instance *function_instance) { struct usb_network_private *npd; struct usbd_class_ethernet_networking_descriptor *ethernet = &ecm_class_2; char address_str[14]; /* do normal network function enable first - this creates network private data */ RETURN_EINVAL_IF(net_fd_function_enable(function_instance, network_ecm, net_fd_recv_urb_ecm, net_fd_start_xmit_ecm, net_fd_start_recv_ecm, 0 )); npd = function_instance->privdata; /* if successful then lets allocate iMACAddress string */ TRACE_MSG6(NTT, "local: %02x:%02x:%02x:%02x:%02x:%02x", npd->local_dev_addr[0], npd->local_dev_addr[1], npd->local_dev_addr[2], npd->local_dev_addr[3], npd->local_dev_addr[4], npd->local_dev_addr[5]); TRACE_MSG6(NTT, "remote: %02x:%02x:%02x:%02x:%02x:%02x", npd->remote_dev_addr[0], npd->remote_dev_addr[1], npd->remote_dev_addr[2], npd->remote_dev_addr[3], npd->remote_dev_addr[4], npd->remote_dev_addr[5]); snprintf(address_str, 13, "%02x%02x%02x%02x%02x%02x", npd->remote_dev_addr[0], npd->remote_dev_addr[1], npd->remote_dev_addr[2], npd->remote_dev_addr[3], npd->remote_dev_addr[4], npd->remote_dev_addr[5]); ethernet->iMACAddress = usbd_alloc_string(function_instance, address_str); return 0; }
/*! pmic_otg_event_bh - pmic otg event handler * @param data - otg instance */ void pmic_otg_event_bh(void *data) { struct otg_instance *otg = (struct otg_instance *) data; otg_current_t inputs; t_sensor_bits sense_bits; static BOOL force = TRUE; static otg_current_t inputs_saved = 0; if (pmic_get_sensors(&sense_bits)) { printk(KERN_INFO "%s: pmic_get_sensors() failed\n", __FUNCTION__); return; } TRACE_MSG6(REMOVE_TCD, "usb4v4s%c usb2v0s%c usb0v8s:%c id_gnds%c id_floats%c id_se1s%c", sense_bits.sense_usb4v4s ? ' ' : '/', sense_bits.sense_usb2v0s ? ' ' : '/', sense_bits.sense_usb0v8s ? ' ' : '/', sense_bits.sense_id_gnds ? ' ' : '/', sense_bits.sense_id_floats ? ' ' : '/', sense_bits.sense_se1s ? ' ' : '/'); inputs = (sense_bits.sense_usb4v4s ? VBUS_VLD : VBUS_VLD_) | (sense_bits. sense_usb2v0s ? (B_SESS_VLD | A_SESS_VLD) : (B_SESS_VLD_ | A_SESS_VLD_)) | (sense_bits.sense_usb0v8s ? B_SESS_END_ : B_SESS_END) | (sense_bits. sense_id_gnds ? ID_GND : ID_GND_) | (sense_bits.sense_id_floats ? ID_FLOAT : ID_FLOAT_) | (sense_bits. sense_se1s ? SE1_DET : SE1_DET_) | (det_dp_hi ? DP_HIGH : DP_HIGH_) | (det_dm_hi ? DM_HIGH : DM_HIGH_); // printk(KERN_INFO" inputs: %8X\n", inputs); TRACE_MSG4(REMOVE_TCD, "MC13783 EVENT: sense_bits: %8x otg inputs: %8x saved: %x diff: %x", sense_bits.sense_se1s, inputs, inputs_saved, inputs ^ inputs_saved); RETURN_UNLESS(force || (inputs ^ inputs_saved)); inputs_saved = inputs; otg_event(REMOVE_tcd_instance->otg, inputs, REMOVE_TCD, "PMIC OTG EVENT"); // gpio_config_int_en(2, 17, TRUE); // gpio_config_int_en(2, 16, TRUE); // gpio_clear_int (2, 17); // gpio_clear_int (2, 16); }
/*! * mxc_gptcr_start_timer() - start a timer for otg state machine * Set or reset timer to interrupt in number of uS (micro-seconds). * * XXX There may be a floor or minimum that can be effectively set. * XXX We have seen an occasional problem with US(25) for discharge for example. * * @param otg * @param usec */ int mxc_gptcr_start_timer(struct otg_instance *otg, int usec) { u32 ticks; u32 match; unsigned long flags; local_irq_save (flags); //TRACE_MSG2(OCD, "usec: %d CNT: %08x", usec, *_reg_GPT_GPTCNT); if (usec && (usec < 100)) { usec = 100; TRACE_MSG2(OCD, "usec: %d CNT: %08x", usec, *_reg_GPT_GPTCNT); } /* * Disable Channel 3 compare. */ *_reg_GPT_GPTCR &= ~(0x7 << 26); mxc_gptcr_usec_set = usec; if (usec) { mxc_gptcr_ticks_set = ticks = (u32 ) mxc_gptcr_ticks(usec); mxc_gptcr_match_set = 0; mxc_gptcr_active = 0; /* * Compute and set match register */ mxc_gptcr_match_set = match = *_reg_GPT_GPTCNT + ticks; *_reg_GPT_GPTOCR3 = match; mxc_gptcr_active = 1; TRACE_MSG6(OCD, "cnt: %08x match: %08x GPTCNT: %08x GPTCR: %08x GPTSR: %08x GPTOCR3: %08x\n", mxc_gptcr_ticks_set, mxc_gptcr_match_set, *_reg_GPT_GPTCNT, *_reg_GPT_GPTCR, *_reg_GPT_GPTSR, *_reg_GPT_GPTOCR3); /* * Enable interrupt */ *_reg_GPT_GPTIR |= (0x01 << 2); } local_irq_restore (flags); return 0; }
/*! arc_add_buffer_to_dtd * * C.f. 39.16.5.3 - case 1: Link list is empty */ static void arc_add_buffer_to_dtd (struct pcd_instance *pcd, struct usbd_endpoint_instance *endpoint, int dir, int len, int offset) { struct otg_instance *otg = pcd->otg; struct usbd_urb *urb = endpoint->active_urb; struct arc_private_struct *privdata = endpoint->privdata; u8 hs = pcd->bus->high_speed; u8 physicalEndpoint = endpoint->physicalEndpoint[hs]; u8 bEndpointAddress = endpoint->bEndpointAddress[hs]; u8 epnum = bEndpointAddress & 0x3f; u16 wMaxPacketSize = endpoint->wMaxPacketSize[hs]; struct ep_queue_head *dQH = &udc_controller->ep_qh[2 * epnum + dir]; struct ep_td_struct *dtd = &(udc_controller->ep_dtd[2 * epnum + dir]); u32 mask = 0; int timeout1 = 0; int timeout2 = 0; u32 endptstat = -1; u32 endptprime = -1; u32 endptcomplete = -1; TRACE_MSG6(pcd->TAG, "[%2d] USBCMD: %08x ENDPTPRIME: %08x COMPLETE: %08x STATUS: %08x %s", 2*epnum+dir, UOG_USBCMD, UOG_ENDPTPRIME, UOG_ENDPTCOMPLETE, (u32)dQH->size_ioc_int_sts, (dir == ARC_DIR_OUT) ? "OUT" : "IN"); if (urb && urb->buffer) { TRACE_MSG4(pcd->TAG, "buffer: %x length: %d alloc: %d dir: %d ", urb->buffer, urb->actual_length, urb->buffer_length, dir); /* flush cache for IN */ if ((dir == ARC_DIR_IN) && urb->actual_length) dma_cache_maint(urb->buffer, urb->actual_length, DMA_TO_DEVICE); /* invalidate cache for OUT */ else if ((dir == ARC_DIR_OUT) && urb->buffer_length) dma_cache_maint(urb->buffer, urb->alloc_length, DMA_FROM_DEVICE); } /* Set size and interrupt on each dtd, Clear reserved field, * set pointers and flush from cache, and save in cur_dqh for dtd_releases() */ memset(dtd, 0, sizeof(struct ep_td_struct)); dtd->size_ioc_sts = cpu_to_le32(((len << DTD_LENGTH_BIT_POS) | DTD_IOC | DTD_STATUS_ACTIVE)); dtd->size_ioc_sts &= cpu_to_le32(~DTD_RESERVED_FIELDS); dtd->buff_ptr0 = cpu_to_le32(endpoint->active_urb ? (u32) (virt_to_phys (endpoint->active_urb->buffer + offset)) : 0); dtd->next_td_ptr = cpu_to_le32(DTD_NEXT_TERMINATE); dtd->next_td_virt = NULL; dma_cache_maint(dtd, sizeof(struct ep_td_struct), DMA_TO_DEVICE); privdata->cur_dqh = dQH; /* Case 1 - Step 1 - Write dQH next pointer and dQH terminate bit to 0 as single DWord */ dQH->next_dtd_ptr = cpu_to_le32( virt_to_phys((void *)dtd) & EP_QUEUE_HEAD_NEXT_POINTER_MASK); /* Case 1 - Step 2 - Clear active and halt bit */ dQH->size_ioc_int_sts &= le32_to_cpu(~(EP_QUEUE_HEAD_STATUS_ACTIVE | EP_QUEUE_HEAD_STATUS_HALT)); dma_cache_maint(dQH, sizeof(struct ep_queue_head), DMA_TO_DEVICE); /* Case 1 - Step 3 - Prime endpoint by writing ENDPTPRIME */ mask = (dir == ARC_DIR_OUT) ? (1 << epnum) : (1 << (epnum + 16)); /* Verify that endpoint PRIME is not set, wait if necessary. */ for (timeout1 = 0; (UOG_ENDPTPRIME & mask) && (timeout1 ++ < 100); udelay(1)); /* ep0 needs extra tests */ UNLESS(epnum) { /* C.f. 39.16.3.2.2 Data Phase */ UOG_ENDPTPRIME |= mask; for (timeout2 = 0; timeout2++ < 100; ) { endptprime = UOG_ENDPTPRIME; // order may be important endptstat = UOG_ENDPTSTAT; // we check stat after prime BREAK_IF(endptstat & mask); BREAK_UNLESS(endptprime & mask); } if (!(endptstat & mask) && !(endptprime & mask)) { TRACE_MSG2(pcd->TAG, "[%2d] ENDPTSETUPSTAT: %04x PREMATURE FAILUURE", 2*epnum+dir, UOG_ENDPTSETUPSTAT); } TRACE_MSG6(pcd->TAG, "[%2d] ENDPTPRIME %08x ENPTSTAT: %08x mask: %08x timeout: %d:%d SET", 2*epnum+dir, UOG_ENDPTPRIME, UOG_ENDPTSTAT, mask, timeout1, timeout2);; } /* epn general case */ else {