unsigned int peak_netmap_detach(const char *ifname) { struct my_ring *slot = NULL; struct my_ring **me = NULL; struct pollfd *fd = NULL; unsigned int i; NETMAP_LOCK(); i = peak_netmap_find(ifname); if (i >= NETMAP_COUNT()) { NETMAP_UNLOCK(); alert("netmap interface %s not attached\n", ifname); return (1); } me = &self->me[i]; fd = &self->fd[i]; slot = *me; free(slot->ifname); netmap_close(slot); NETMAP_PUT(slot); if (i < NETMAP_COUNT()) { /* reshuffle list to make it linear */ *me = self->me[NETMAP_COUNT()]; self->me[NETMAP_COUNT()] = NULL; /* rebuild pollfd list as well */ bcopy(fd, &self->fd[NETMAP_COUNT()], sizeof(*fd)); bzero(&self->fd[NETMAP_COUNT()], sizeof(*fd)); } /* lazy exit */ if (!NETMAP_COUNT()) { prealloc_exit(&self->pkt_pool); prealloc_exit(&self->me_pool); } NETMAP_UNLOCK(); return (0); }
/* * bridge [-v] if1 [if2] * * If only one name, or the two interfaces are the same, * bridges userland and the adapter. Otherwise bridge * two intefaces. */ int main(int argc, char **argv) { struct pollfd pollfd[2]; int i, ch; u_int burst = 1024, wait_link = 4; struct my_ring me[2]; char *ifa = NULL, *ifb = NULL; fprintf(stderr, "%s %s built %s %s\n", argv[0], version, __DATE__, __TIME__); bzero(me, sizeof(me)); while ( (ch = getopt(argc, argv, "b:i:vw:")) != -1) { switch (ch) { default: D("bad option %c %s", ch, optarg); usage(); break; case 'b': /* burst */ burst = atoi(optarg); break; case 'i': /* interface */ if (ifa == NULL) ifa = optarg; else if (ifb == NULL) ifb = optarg; else D("%s ignored, already have 2 interfaces", optarg); break; case 'v': verbose++; break; case 'w': wait_link = atoi(optarg); break; } } argc -= optind; argv += optind; if (argc > 1) ifa = argv[1]; if (argc > 2) ifb = argv[2]; if (argc > 3) burst = atoi(argv[3]); if (!ifb) ifb = ifa; if (!ifa) { D("missing interface"); usage(); } if (burst < 1 || burst > 8192) { D("invalid burst %d, set to 1024", burst); burst = 1024; } if (wait_link > 100) { D("invalid wait_link %d, set to 4", wait_link); wait_link = 4; } /* setup netmap interface #1. */ me[0].ifname = ifa; me[1].ifname = ifb; if (!strcmp(ifa, ifb)) { D("same interface, endpoint 0 goes to host"); i = NETMAP_SW_RING; } else { /* two different interfaces. Take all rings on if1 */ i = 0; // all hw rings } if (netmap_open(me, i, 1)) return (1); me[1].mem = me[0].mem; /* copy the pointer, so only one mmap */ if (netmap_open(me+1, 0, 1)) return (1); /* setup poll(2) variables. */ memset(pollfd, 0, sizeof(pollfd)); for (i = 0; i < 2; i++) { pollfd[i].fd = me[i].fd; pollfd[i].events = (POLLIN); } D("Wait %d secs for link to come up...", wait_link); sleep(wait_link); D("Ready to go, %s 0x%x/%d <-> %s 0x%x/%d.", me[0].ifname, me[0].queueid, me[0].nifp->ni_rx_rings, me[1].ifname, me[1].queueid, me[1].nifp->ni_rx_rings); /* main loop */ signal(SIGINT, sigint_h); while (!do_abort) { int n0, n1, ret; pollfd[0].events = pollfd[1].events = 0; pollfd[0].revents = pollfd[1].revents = 0; n0 = pkt_queued(me, 0); n1 = pkt_queued(me + 1, 0); if (n0) pollfd[1].events |= POLLOUT; else pollfd[0].events |= POLLIN; if (n1) pollfd[0].events |= POLLOUT; else pollfd[1].events |= POLLIN; ret = poll(pollfd, 2, 2500); if (ret <= 0 || verbose) D("poll %s [0] ev %x %x rx %d@%d tx %d," " [1] ev %x %x rx %d@%d tx %d", ret <= 0 ? "timeout" : "ok", pollfd[0].events, pollfd[0].revents, pkt_queued(me, 0), me[0].rx->cur, pkt_queued(me, 1), pollfd[1].events, pollfd[1].revents, pkt_queued(me+1, 0), me[1].rx->cur, pkt_queued(me+1, 1) ); if (ret < 0) continue; if (pollfd[0].revents & POLLERR) { D("error on fd0, rxcur %d@%d", me[0].rx->avail, me[0].rx->cur); } if (pollfd[1].revents & POLLERR) { D("error on fd1, rxcur %d@%d", me[1].rx->avail, me[1].rx->cur); } if (pollfd[0].revents & POLLOUT) { move(me + 1, me, burst); // XXX we don't need the ioctl */ // ioctl(me[0].fd, NIOCTXSYNC, NULL); } if (pollfd[1].revents & POLLOUT) { move(me, me + 1, burst); // XXX we don't need the ioctl */ // ioctl(me[1].fd, NIOCTXSYNC, NULL); } } D("exiting"); netmap_close(me + 1); netmap_close(me + 0); return (0); }
static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry, const char *netdev, odp_pool_t pool) { int i; int err; int sockfd; int mtu; uint32_t buf_size; pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm; struct nm_desc *desc; struct netmap_ring *ring; odp_pktin_hash_proto_t hash_proto; odp_pktio_stats_t cur_stats; if (getenv("ODP_PKTIO_DISABLE_NETMAP")) return -1; if (pool == ODP_POOL_INVALID) return -1; /* Init pktio entry */ memset(pkt_nm, 0, sizeof(*pkt_nm)); pkt_nm->sockfd = -1; pkt_nm->pool = pool; /* max frame len taking into account the l2-offset */ pkt_nm->max_frame_len = ODP_CONFIG_PACKET_BUF_LEN_MAX - odp_buffer_pool_headroom(pool) - odp_buffer_pool_tailroom(pool); snprintf(pktio_entry->s.name, sizeof(pktio_entry->s.name), "%s", netdev); snprintf(pkt_nm->nm_name, sizeof(pkt_nm->nm_name), "netmap:%s", netdev); /* Dummy open here to check if netmap module is available and to read * capability info. */ desc = nm_open(pkt_nm->nm_name, NULL, 0, NULL); if (desc == NULL) { ODP_ERR("nm_open(%s) failed\n", pkt_nm->nm_name); goto error; } if (desc->nifp->ni_rx_rings > NM_MAX_DESC) { ODP_ERR("Unable to store all rx rings\n"); nm_close(desc); goto error; } pkt_nm->num_rx_rings = desc->nifp->ni_rx_rings; pkt_nm->capa.max_input_queues = PKTIO_MAX_QUEUES; if (desc->nifp->ni_rx_rings < PKTIO_MAX_QUEUES) pkt_nm->capa.max_input_queues = desc->nifp->ni_rx_rings; if (desc->nifp->ni_tx_rings > NM_MAX_DESC) { ODP_ERR("Unable to store all tx rings\n"); nm_close(desc); goto error; } pkt_nm->num_tx_rings = desc->nifp->ni_tx_rings; pkt_nm->capa.max_output_queues = PKTIO_MAX_QUEUES; if (desc->nifp->ni_tx_rings < PKTIO_MAX_QUEUES) pkt_nm->capa.max_output_queues = desc->nifp->ni_tx_rings; ring = NETMAP_RXRING(desc->nifp, desc->cur_rx_ring); buf_size = ring->nr_buf_size; nm_close(desc); sockfd = socket(AF_INET, SOCK_DGRAM, 0); if (sockfd == -1) { ODP_ERR("Cannot get device control socket\n"); goto error; } pkt_nm->sockfd = sockfd; /* Use either interface MTU (+ ethernet header length) or netmap buffer * size as MTU, whichever is smaller. */ mtu = mtu_get_fd(pktio_entry->s.pkt_nm.sockfd, pktio_entry->s.name) + ODPH_ETHHDR_LEN; if (mtu < 0) { ODP_ERR("Unable to read interface MTU\n"); goto error; } pkt_nm->mtu = ((uint32_t)mtu < buf_size) ? (uint32_t)mtu : buf_size; /* Check if RSS is supported. If not, set 'max_input_queues' to 1. */ if (rss_conf_get_supported_fd(sockfd, netdev, &hash_proto) == 0) { ODP_DBG("RSS not supported\n"); pkt_nm->capa.max_input_queues = 1; } err = netmap_do_ioctl(pktio_entry, SIOCGIFFLAGS, 0); if (err) goto error; if ((pkt_nm->if_flags & IFF_UP) == 0) ODP_DBG("%s is down\n", pktio_entry->s.name); err = mac_addr_get_fd(sockfd, netdev, pkt_nm->if_mac); if (err) goto error; for (i = 0; i < PKTIO_MAX_QUEUES; i++) { odp_ticketlock_init(&pkt_nm->rx_desc_ring[i].s.lock); odp_ticketlock_init(&pkt_nm->tx_desc_ring[i].s.lock); } /* netmap uses only ethtool to get statistics counters */ err = ethtool_stats_get_fd(pktio_entry->s.pkt_nm.sockfd, pktio_entry->s.name, &cur_stats); if (err) { ODP_ERR( "netmap pktio %s does not support statistics counters\n", pktio_entry->s.name); pktio_entry->s.stats_type = STATS_UNSUPPORTED; } else { pktio_entry->s.stats_type = STATS_ETHTOOL; } (void)netmap_stats_reset(pktio_entry); return 0; error: netmap_close(pktio_entry); return -1; }
int netmap_open(struct nm_if *nmif) { char ifbuf[IF_NAMESIZE], *p; const char *ifname; int len; struct nmreq nmreq; if (nmif->nm_if_vale) { /* Attach hw interface to VALE switch. */ if (netmap_vale_attach(nmif) != 0) { netmap_close(nmif); return (-1); } /* Attach netmap-fwd to VALE switch. */ p = strchr(nmif->nm_if_name, ':'); len = 0; if (p) len = p - nmif->nm_if_name; memset(ifbuf, 0, sizeof(ifbuf)); snprintf(ifbuf, sizeof(ifbuf) - 1, "%.*s:nmfwd0", len, nmif->nm_if_name); ifname = ifbuf; } else ifname = nmif->nm_if_name; nmif->nm_if_fd = open("/dev/netmap", O_RDWR); if (nmif->nm_if_fd == -1) { perror("open"); return (-1); } memset(&nmreq, 0, sizeof(nmreq)); strlcpy(nmreq.nr_name, ifname, sizeof(nmreq.nr_name)); nmreq.nr_version = NETMAP_API; if (nohostring || nmif->nm_if_vale) nmreq.nr_flags = NR_REG_ALL_NIC; else nmreq.nr_flags = NR_REG_NIC_SW; if (nmif->nm_if_vale) nmreq.nr_tx_rings = nmreq.nr_rx_rings = 4; if (ioctl(nmif->nm_if_fd, NIOCREGIF, &nmreq) == -1) { perror("ioctl"); netmap_close(nmif); return (-1); } DPRINTF("fd: %d\n", nmif->nm_if_fd); DPRINTF("name: %s\n", nmreq.nr_name); DPRINTF("version: %d\n", nmreq.nr_version); DPRINTF("offset: %d\n", nmreq.nr_offset); DPRINTF("memsize: %d\n", nmreq.nr_memsize); DPRINTF("tx_slots: %d\n", nmreq.nr_tx_slots); DPRINTF("rx_slots: %d\n", nmreq.nr_rx_slots); DPRINTF("tx_rings: %d\n", nmreq.nr_tx_rings); DPRINTF("rx_rings: %d\n", nmreq.nr_rx_rings); DPRINTF("ringid: %#x\n", nmreq.nr_ringid); DPRINTF("flags: %#x\n", nmreq.nr_flags); nmif->nm_if_memsize = nmreq.nr_memsize; nmif->nm_if_mem = mmap(NULL, nmif->nm_if_memsize, PROT_READ | PROT_WRITE, MAP_SHARED, nmif->nm_if_fd, 0); if (nmif->nm_if_mem == MAP_FAILED) { perror("mmap"); netmap_close(nmif); return (-1); } nmif->nm_if_ifp = NETMAP_IF(nmif->nm_if_mem, nmreq.nr_offset); nmif->nm_if_ev_read = event_new(ev_get_base(), nmif->nm_if_fd, EV_READ | EV_PERSIST, netmap_read, nmif); event_add(nmif->nm_if_ev_read, NULL); return (0); }