static int spi_imx_transfer(struct spi_device *spi, struct spi_transfer *transfer) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); u64 timeout = 100; spi_imx->tx_buf = transfer->tx_buf; spi_imx->rx_buf = transfer->rx_buf; spi_imx->count = transfer->len; spi_imx->txfifo = 0; init_completion(&spi_imx->xfer_done); spi_imx_push(spi_imx); spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); vmm_completion_wait_timeout(&spi_imx->xfer_done, &timeout); if (!timeout) { spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); vmm_completion_wait(&spi_imx->xfer_done); } return transfer->len; }
int usb_control_msg(struct usb_device *dev, u32 pipe, u8 request, u8 requesttype, u16 value, u16 index, void *data, u16 size, int timeout) { int rc; u64 tout; struct urb u; struct vmm_completion uc; struct usb_devrequest setup_packet; /* Initialize setup packet */ setup_packet.requesttype = requesttype; setup_packet.request = request; setup_packet.value = vmm_cpu_to_le16(value); setup_packet.index = vmm_cpu_to_le16(index); setup_packet.length = vmm_cpu_to_le16(size); DPRINTF("%s: request: 0x%X, requesttype: 0x%X, " \ "value 0x%X index 0x%X length 0x%X\n", __func__, request, requesttype, value, index, size); /* Initialize URB */ usb_init_urb(&u); /* Initialize URB completion */ INIT_COMPLETION(&uc); /* Fill URB */ usb_fill_control_urb(&u, dev, pipe, (unsigned char *)&setup_packet, data, size, urb_request_complete, &uc); /* Submit URB */ rc = usb_hcd_submit_urb(&u); if (rc) { return rc; } /* Wait for completion */ if (timeout < 1) { vmm_completion_wait(&uc); rc = VMM_OK; } else { tout = timeout * 1000000ULL; rc = vmm_completion_wait_timeout(&uc, &tout); } if (rc) { return rc; } /* If URB failed then return status */ if (u.status < 0) { return u.status; } return VMM_OK; }
int usb_bulk_msg(struct usb_device *dev, u32 pipe, void *data, int len, int *actual_length, int timeout) { int rc; u64 tout; struct urb u; struct vmm_completion uc; /* Initialize URB */ usb_init_urb(&u); /* Initialize URB completion */ INIT_COMPLETION(&uc); /* Fill Bulk URB */ usb_fill_bulk_urb(&u, dev, pipe, data, len, urb_request_complete, &uc); /* Submit URB */ rc = usb_hcd_submit_urb(&u); if (rc) { return rc; } /* Wait for completion */ if (timeout < 1) { vmm_completion_wait(&uc); rc = VMM_OK; } else { tout = timeout * 1000000ULL; rc = vmm_completion_wait_timeout(&uc, &tout); } if (rc) { return rc; } /* If URB failed then return status */ if (u.status < 0) { return u.status; } /* Return actual transfer length */ if (actual_length) { *actual_length = u.actual_length; } return VMM_OK; }
/** * uIP doesn't provide a mechanism to create a raw-IP packet so * we trigger the sending of ECHO_REQUEST by sending ourself an * ECHO_REPLY message with all-zeroes destination IP address. * * A global completion variable is used to notify the reception * of the actual ECHO_REPLY */ int vmm_netstack_send_icmp_echo(u8 *ripaddr, u16 size, u16 seqno, struct vmm_icmp_echo_reply *reply) { struct vmm_mbuf *mbuf; struct uip_icmp_echo_request *echo_req; u16 all_zeroes_addr[] = {0, 0}; u8 *tmp; u64 timeout = (u64)20000000000; u16 ethsize; /* Create a mbuf */ MGETHDR(mbuf, 0, 0); ethsize = UIP_ICMP_LLH_LEN + UIP_ICMP_ECHO_DLEN; MEXTMALLOC(mbuf, ethsize, 0); mbuf->m_len = mbuf->m_pktlen = ethsize; /* Skip the src & dst mac addresses as they will be filled by * uip_netport_loopback_send */ tmp = mtod(mbuf, u8 *) + 12; /* IPv4 ethertype */ *tmp++ = 0x08; *tmp++ = 0x00; /* Fillup the echo_request structure embedded in ICMP payload */ echo_req = (struct uip_icmp_echo_request *)(tmp + UIP_ICMP_IPH_LEN); uip_ipaddr_copy(echo_req->ripaddr, ripaddr); echo_req->len = size; echo_req->seqno = seqno; /* Fillup the IP header */ uip_create_ip_pkt(tmp, all_zeroes_addr, (ethsize - UIP_LLH_LEN)); /* Fillup the ICMP header at last as the icmpchksum is calculated * over entire icmp message */ uip_create_icmp_pkt(tmp, ICMP_ECHO_REPLY, (ethsize - UIP_LLH_LEN - UIP_IPH_LEN), 0); /* Update pointer to store uip_ping_reply */ uip_ping_reply = reply; /* Send the mbuf to self to trigger ICMP_ECHO */ uip_netport_loopback_send(mbuf); /* Wait for the reply until timeout */ vmm_completion_wait_timeout(&uip_ping_done, &timeout); /* The callback has copied the reply data before completing, so we * can safely set the pointer as NULL to prevent unwanted callbacks */ uip_ping_reply = NULL; if(timeout == (u64)0) return VMM_EFAIL; return VMM_OK; }
/** * Fills the uip_buf with packet from RX queue. In case RX queue is * empty, we wait for sometime. */ int uip_netport_read(void) { struct vmm_mbuf *mbuf; struct dlist *node; unsigned long flags; u64 timeout = 50000000; struct uip_port_state *s = &uip_port_state; /* Keep trying till RX buf is not empty */ vmm_spin_lock_irqsave(&s->lock, flags); while(list_empty(&s->rxbuf)) { vmm_spin_unlock_irqrestore(&s->lock, flags); if(timeout) { /* Still time left for timeout so we wait */ vmm_completion_wait_timeout(&s->rx_possible, &timeout); } else { /* We timed-out and buffer is still empty, so return */ uip_len = 0; return uip_len; } vmm_spin_lock_irqsave(&s->lock, flags); } /* At this point we are sure rxbuf is non-empty, so we just * dequeue a packet */ node = list_pop(&s->rxbuf); mbuf = m_list_entry(node); vmm_spin_unlock_irqrestore(&s->lock, flags); if(mbuf == NULL) { vmm_panic("%s: mbuf is null\n", __func__); } if(!uip_buf) { vmm_panic("%s: uip_buf is null\n", __func__); } /* Copy the data from mbuf to uip_buf */ uip_len = min(UIP_BUFSIZE, mbuf->m_pktlen); m_copydata(mbuf, 0, uip_len, uip_buf); /* Free the mbuf */ m_freem(mbuf); return uip_len; }
/** * Prefetching of ARP mapping is done by sending ourself a broadcast ARP * message with ARP_HINT as opcode. */ void vmm_netstack_prefetch_arp_mapping(u8 *ipaddr) { struct vmm_mbuf *mbuf; int size; u64 timeout = (u64)5000000000; /* No need to prefetch our own mapping */ if(!memcmp(ipaddr, uip_hostaddr, 4)) { return; } /* Create a mbuf */ MGETHDR(mbuf, 0, 0); size = sizeof(struct arp_hdr); MEXTMALLOC(mbuf, size, 0); mbuf->m_len = mbuf->m_pktlen = size; /* Create an ARP HINT packet in the buffer */ uip_create_broadcast_eth_arp_pkt(mtod(mbuf, u8 *), ipaddr, ARP_HINT); /* Send the mbuf to self to trigger ARP prefetch */ uip_netport_loopback_send(mbuf); /* Block till arp prefetch is done */ vmm_completion_wait_timeout(&uip_arp_prefetch_done, &timeout); }
int netstack_send_echo(u8 *ripaddr, u16 size, u16 seqno, struct netstack_echo_reply *reply) { int i, rc; u64 timeout = PING_DELAY_NS; struct pbuf *p; struct icmp_echo_hdr *iecho; size_t len = sizeof(struct icmp_echo_hdr) + size; LWIP_ASSERT("ping_size <= 0xffff", len <= 0xffff); /* Lock ping context for atomicity */ vmm_mutex_lock(&lns.ping_lock); /* Alloc ping pbuf */ p = pbuf_alloc(PBUF_IP, (u16_t)len, PBUF_RAM); if (!p) { vmm_mutex_unlock(&lns.ping_lock); return VMM_ENOMEM; } if ((p->len != p->tot_len) || (p->next != NULL)) { pbuf_free(p); vmm_mutex_unlock(&lns.ping_lock); return VMM_EFAIL; } /* Prepare ECHO request */ iecho = (struct icmp_echo_hdr *)p->payload; ICMPH_TYPE_SET(iecho, ICMP_ECHO); ICMPH_CODE_SET(iecho, 0); iecho->chksum = 0; iecho->id = PING_ID; iecho->seqno = htons(seqno); for (i = 0; i < size; i++) { ((char*)iecho)[sizeof(struct icmp_echo_hdr) + i] = (char)i; } iecho->chksum = inet_chksum(iecho, len); /* Prepare target address */ IP4_ADDR(&lns.ping_addr, ripaddr[0],ripaddr[1],ripaddr[2],ripaddr[3]); /* Save ping info */ lns.ping_seq_num = seqno; lns.ping_reply = reply; lns.ping_recv_tstamp = 0; lns.ping_send_tstamp = vmm_timer_timestamp(); lns.ping_recv_tstamp = lns.ping_send_tstamp + PING_DELAY_NS; /* Send ping packet */ raw_sendto(lns.ping_pcb, p, &lns.ping_addr); /* Wait for ping to complete with timeout */ timeout = lns.ping_recv_tstamp - lns.ping_send_tstamp; rc = vmm_completion_wait_timeout(&lns.ping_done, &timeout); timeout = lns.ping_recv_tstamp - lns.ping_send_tstamp; lns.ping_reply->rtt = udiv64(timeout, 1000); /* Free ping pbuf */ pbuf_free(p); /* Clear ping reply pointer */ lns.ping_reply = NULL; /* Unloack ping context */ vmm_mutex_unlock(&lns.ping_lock); return rc; }