void ldc_send_ack(struct ldc_conn *lc) { struct ldc_pkt *lp; uint64_t tx_head, tx_tail, tx_state; int err; mutex_enter(&lc->lc_txq->lq_mtx); err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (err != H_EOK || tx_state != LDC_CHANNEL_UP) { mutex_exit(&lc->lc_txq->lq_mtx); return; } lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_txq->lq_va + tx_tail); bzero(lp, sizeof(struct ldc_pkt)); lp->type = LDC_CTRL; lp->stype = LDC_ACK; lp->ctrl = LDC_VERS; lp->major = 1; lp->minor = 0; tx_tail += sizeof(*lp); tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(*lp)) - 1); err = hv_ldc_tx_set_qtail(lc->lc_id, tx_tail); if (err != H_EOK) { printf("%s: hv_ldc_tx_set_qtail: %d\n", __func__, err); mutex_exit(&lc->lc_txq->lq_mtx); return; } lc->lc_state = LDC_RCV_VERS; mutex_exit(&lc->lc_txq->lq_mtx); }
void ldc_send_rdx(struct ldc_conn *lc) { struct ldc_pkt *lp; uint64_t tx_head, tx_tail, tx_state; int err; mutex_enter(&lc->lc_txq->lq_mtx); err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (err != H_EOK || tx_state != LDC_CHANNEL_UP) { mutex_exit(&lc->lc_txq->lq_mtx); return; } lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_txq->lq_va + tx_tail); bzero(lp, sizeof(struct ldc_pkt)); lp->type = LDC_CTRL; lp->stype = LDC_INFO; lp->ctrl = LDC_RDX; lp->env = LDC_MODE_UNRELIABLE; lp->seqid = lc->lc_tx_seqid++; tx_tail += sizeof(*lp); tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(*lp)) - 1); err = hv_ldc_tx_set_qtail(lc->lc_id, tx_tail); if (err != H_EOK) { printf("%s: hv_ldc_tx_set_qtail: %d\n", __func__, err); mutex_exit(&lc->lc_txq->lq_mtx); return; } lc->lc_state = LDC_SND_RDX; mutex_exit(&lc->lc_txq->lq_mtx); }
int vdsk_tx_intr(void *arg) { struct vdsk_softc *sc = arg; struct ldc_conn *lc = &sc->sc_lc; uint64_t tx_head, tx_tail, tx_state; hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (tx_state != lc->lc_tx_state) { switch (tx_state) { case LDC_CHANNEL_DOWN: DPRINTF(("Tx link down\n")); break; case LDC_CHANNEL_UP: DPRINTF(("Tx link up\n")); break; case LDC_CHANNEL_RESET: DPRINTF(("Tx link reset\n")); break; } lc->lc_tx_state = tx_state; } return (1); }
void vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len) { struct ldc_conn *lc = &sc->sc_lc; struct ldc_pkt *lp; uint64_t tx_head, tx_tail, tx_state; int err; err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (err != H_EOK) return; lp = (struct ldc_pkt *)(lc->lc_txq->lq_va + tx_tail); bzero(lp, sizeof(struct ldc_pkt)); lp->type = LDC_DATA; lp->stype = LDC_INFO; KASSERT((len & ~LDC_LEN_MASK) == 0); lp->env = len | LDC_FRAG_STOP | LDC_FRAG_START; lp->seqid = lc->lc_tx_seqid++; bcopy(msg, &lp->major, len); tx_tail += sizeof(*lp); tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(*lp)) - 1); err = hv_ldc_tx_set_qtail(lc->lc_id, tx_tail); if (err != H_EOK) printf("%s: hv_ldc_tx_set_qtail: %d\n", __func__, err); }
int vdsp_tx_intr(void *arg) { struct vdsp_softc *sc = arg; struct ldc_conn *lc = &sc->sc_lc; uint64_t tx_head, tx_tail, tx_state; int err; err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (err != H_EOK) { printf("hv_ldc_rx_get_state %d\n", err); return (0); } if (tx_state != lc->lc_tx_state) { switch (tx_state) { case LDC_CHANNEL_DOWN: DPRINTF(("Tx link down\n")); break; case LDC_CHANNEL_UP: DPRINTF(("Tx link up\n")); break; case LDC_CHANNEL_RESET: DPRINTF(("Tx link reset\n")); break; } lc->lc_tx_state = tx_state; } wakeup(lc->lc_txq); return (1); }
int vldcp_tx_intr(void *arg) { struct vldcp_softc *sc = arg; struct ldc_conn *lc = &sc->sc_lc; uint64_t tx_head, tx_tail, tx_state; int err; err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (err != H_EOK) { printf("%s: hv_ldc_tx_get_state %d\n", __func__, err); return (0); } if (tx_state != lc->lc_tx_state) { switch (tx_state) { case LDC_CHANNEL_DOWN: DPRINTF(("Tx link down\n")); break; case LDC_CHANNEL_UP: DPRINTF(("Tx link up\n")); break; case LDC_CHANNEL_RESET: DPRINTF(("Tx link reset\n")); break; } lc->lc_tx_state = tx_state; } cbus_intr_setenabled(sc->sc_tx_sysino, INTR_DISABLED); selwakeup(&sc->sc_wsel); wakeup(lc->lc_txq); return (1); }
int ldc_send_unreliable(struct ldc_conn *lc, void *msg, size_t len) { struct ldc_pkt *lp; uint64_t tx_head, tx_tail, tx_state; uint64_t tx_avail; uint8_t *p = msg; int err; mutex_enter(&lc->lc_txq->lq_mtx); err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (err != H_EOK || tx_state != LDC_CHANNEL_UP) { mutex_exit(&lc->lc_txq->lq_mtx); return (EIO); } tx_avail = (tx_head - tx_tail) / sizeof(*lp) + lc->lc_txq->lq_nentries - 1; tx_avail %= lc->lc_txq->lq_nentries; if (len > tx_avail * LDC_PKT_PAYLOAD) { mutex_exit(&lc->lc_txq->lq_mtx); return (EWOULDBLOCK); } while (len > 0) { lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_txq->lq_va + tx_tail); bzero(lp, sizeof(struct ldc_pkt)); lp->type = LDC_DATA; lp->stype = LDC_INFO; lp->env = min(len, LDC_PKT_PAYLOAD); if (p == msg) lp->env |= LDC_FRAG_START; if (len <= LDC_PKT_PAYLOAD) lp->env |= LDC_FRAG_STOP; lp->seqid = lc->lc_tx_seqid++; bcopy(p, &lp->major, min(len, LDC_PKT_PAYLOAD)); tx_tail += sizeof(*lp); tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(*lp)) - 1); err = hv_ldc_tx_set_qtail(lc->lc_id, tx_tail); if (err != H_EOK) { printf("%s: hv_ldc_tx_set_qtail: %d\n", __func__, err); mutex_exit(&lc->lc_txq->lq_mtx); return (EIO); } p += min(len, LDC_PKT_PAYLOAD); len -= min(len, LDC_PKT_PAYLOAD); } mutex_exit(&lc->lc_txq->lq_mtx); return (0); }
int vldcppoll(dev_t dev, int events, struct proc *p) { struct vldcp_softc *sc; struct ldc_conn *lc; uint64_t head, tail, state; int revents = 0; int s, err; sc = vldcp_lookup(dev); if (sc == NULL) return (ENXIO); lc = &sc->sc_lc; s = spltty(); if (events & (POLLIN | POLLRDNORM)) { err = hv_ldc_rx_get_state(lc->lc_id, &head, &tail, &state); if (err == 0 && state == LDC_CHANNEL_UP && head != tail) revents |= events & (POLLIN | POLLRDNORM); } if (events & (POLLOUT | POLLWRNORM)) { err = hv_ldc_tx_get_state(lc->lc_id, &head, &tail, &state); if (err == 0 && state == LDC_CHANNEL_UP && head != tail) revents |= events & (POLLOUT | POLLWRNORM); } if (revents == 0) { if (events & (POLLIN | POLLRDNORM)) { cbus_intr_setenabled(sc->sc_rx_sysino, INTR_ENABLED); selrecord(p, &sc->sc_rsel); } if (events & (POLLOUT | POLLWRNORM)) { cbus_intr_setenabled(sc->sc_tx_sysino, INTR_ENABLED); selrecord(p, &sc->sc_wsel); } } splx(s); return revents; }
void vnet_start(struct ifnet *ifp) { struct vnet_softc *sc = ifp->if_softc; struct ldc_conn *lc = &sc->sc_lc; struct ldc_map *map = sc->sc_lm; struct mbuf *m; paddr_t pa; caddr_t buf; uint64_t tx_head, tx_tail, tx_state; u_int start, prod, count; int err; if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; /* * We cannot transmit packets until a VIO connection has been * established. */ if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) || !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) return; /* * Make sure there is room in the LDC transmit queue to send a * DRING_DATA message. */ err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (err != H_EOK) return; tx_tail += sizeof(struct ldc_pkt); tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(struct ldc_pkt)) - 1); if (tx_tail == tx_head) { ifp->if_flags |= IFF_OACTIVE; return; } if (sc->sc_xfer_mode == VIO_DESC_MODE) { vnet_start_desc(ifp); return; } start = prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1); while (sc->sc_vd->vd_desc[prod].hdr.dstate == VIO_DESC_FREE) { IFQ_POLL(&ifp->if_snd, m); if (m == NULL) break; count = sc->sc_tx_prod - sc->sc_tx_cons; if (count >= (sc->sc_vd->vd_nentries - 1) || map->lm_count >= map->lm_nentries) { ifp->if_flags |= IFF_OACTIVE; break; } buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); if (buf == NULL) { ifp->if_flags |= IFF_OACTIVE; break; } m_copydata(m, 0, m->m_pkthdr.len, buf + VNET_ETHER_ALIGN); IFQ_DEQUEUE(&ifp->if_snd, m); #if NBPFILTER > 0 /* * If BPF is listening on this interface, let it see the * packet before we commit it to the wire. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK)); while (map->lm_slot[map->lm_next].entry != 0) { map->lm_next++; map->lm_next &= (map->lm_nentries - 1); } map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR; atomic_inc_int(&map->lm_count); sc->sc_vd->vd_desc[prod].nbytes = max(m->m_pkthdr.len, 60); sc->sc_vd->vd_desc[prod].ncookies = 1; sc->sc_vd->vd_desc[prod].cookie[0].addr = map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); sc->sc_vd->vd_desc[prod].cookie[0].size = 2048; membar_producer(); sc->sc_vd->vd_desc[prod].hdr.dstate = VIO_DESC_READY; sc->sc_vsd[prod].vsd_map_idx = map->lm_next; sc->sc_vsd[prod].vsd_buf = buf; sc->sc_tx_prod++; prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1); m_freem(m); } membar_producer(); if (start != prod && sc->sc_peer_state != VIO_DP_ACTIVE) { vnet_send_dring_data(sc, start); ifp->if_timer = 5; } }
int vldcpwrite(dev_t dev, struct uio *uio, int ioflag) { struct vldcp_softc *sc; struct ldc_conn *lc; uint64_t tx_head, tx_tail, tx_state; uint64_t next_tx_tail; int err, ret; int s; sc = vldcp_lookup(dev); if (sc == NULL) return (ENXIO); lc = &sc->sc_lc; if (uio->uio_resid != 64) { device_unref(&sc->sc_dv); return (EINVAL); } s = spltty(); retry: err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (err != H_EOK) { splx(s); printf("%s: hv_ldc_tx_get_state %d\n", __func__, err); device_unref(&sc->sc_dv); return (EIO); } if (tx_state != LDC_CHANNEL_UP) { splx(s); device_unref(&sc->sc_dv); return (EIO); } DPRINTF(("tx head %llx, tx tail %llx\n", tx_head, tx_tail)); next_tx_tail = tx_tail + 64; next_tx_tail &= ((lc->lc_txq->lq_nentries * 64) - 1); if (tx_head == next_tx_tail) { cbus_intr_setenabled(sc->sc_tx_sysino, INTR_ENABLED); ret = tsleep(lc->lc_txq, PWAIT | PCATCH, "hvwr", 0); if (ret) { splx(s); device_unref(&sc->sc_dv); return (ret); } goto retry; } splx(s); ret = uiomove(lc->lc_txq->lq_va + tx_tail, 64, uio); err = hv_ldc_tx_set_qtail(lc->lc_id, next_tx_tail); if (err != H_EOK) { printf("%s: hv_ldc_tx_set_qtail: %d\n", __func__, err); device_unref(&sc->sc_dv); return (EIO); } device_unref(&sc->sc_dv); return (ret); }