void vnet_stop(struct ifnet *ifp) { struct vnet_softc *sc = ifp->if_softc; struct ldc_conn *lc = &sc->sc_lc; ifp->if_flags &= ~IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); ifp->if_timer = 0; cbus_intr_setenabled(sc->sc_bustag, sc->sc_tx_ino, INTR_DISABLED); cbus_intr_setenabled(sc->sc_bustag, sc->sc_rx_ino, INTR_DISABLED); intr_barrier(sc->sc_tx_ih); intr_barrier(sc->sc_rx_ih); hv_ldc_tx_qconf(lc->lc_id, 0, 0); hv_ldc_rx_qconf(lc->lc_id, 0, 0); lc->lc_tx_seqid = 0; lc->lc_state = 0; lc->lc_tx_state = lc->lc_rx_state = LDC_CHANNEL_DOWN; vnet_ldc_reset(lc); free(sc->sc_vsd, M_DEVBUF, VNET_NUM_SOFT_DESC * sizeof(*sc->sc_vsd)); vnet_dring_free(sc->sc_dmatag, sc->sc_vd); hv_ldc_set_map_table(lc->lc_id, 0, 0); ldc_map_free(sc->sc_dmatag, sc->sc_lm); }
int vldcpopen(dev_t dev, int flag, int mode, struct proc *p) { struct vldcp_softc *sc; struct ldc_conn *lc; uint64_t rx_head, rx_tail, rx_state; int err; sc = vldcp_lookup(dev); if (sc == NULL) return (ENXIO); lc = &sc->sc_lc; err = hv_ldc_tx_qconf(lc->lc_id, lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries); if (err != H_EOK) printf("%s: hv_ldc_tx_qconf %d\n", __func__, err); err = hv_ldc_rx_qconf(lc->lc_id, lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries); if (err != H_EOK) printf("%s: hv_ldc_rx_qconf %d\n", __func__, err); /* Clear a pending channel reset. */ err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state); if (err != H_EOK) printf("%s: hv_ldc_rx_get_state %d\n", __func__, err); device_unref(&sc->sc_dv); return (0); }
void vnet_init(struct ifnet *ifp) { struct vnet_softc *sc = ifp->if_softc; struct ldc_conn *lc = &sc->sc_lc; int err; sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048); if (sc->sc_lm == NULL) return; err = hv_ldc_set_map_table(lc->lc_id, sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries); if (err != H_EOK) { printf("hv_ldc_set_map_table %d\n", err); return; } sc->sc_vd = vnet_dring_alloc(sc->sc_dmatag, VNET_NUM_SOFT_DESC); if (sc->sc_vd == NULL) return; sc->sc_vsd = malloc(VNET_NUM_SOFT_DESC * sizeof(*sc->sc_vsd), M_DEVBUF, M_NOWAIT|M_ZERO); if (sc->sc_vsd == NULL) return; sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr; sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK; sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW; sc->sc_lm->lm_next = 1; sc->sc_lm->lm_count = 1; err = hv_ldc_tx_qconf(lc->lc_id, lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries); if (err != H_EOK) printf("hv_ldc_tx_qconf %d\n", err); err = hv_ldc_rx_qconf(lc->lc_id, lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries); if (err != H_EOK) printf("hv_ldc_rx_qconf %d\n", err); cbus_intr_setenabled(sc->sc_bustag, sc->sc_tx_ino, INTR_ENABLED); cbus_intr_setenabled(sc->sc_bustag, sc->sc_rx_ino, INTR_ENABLED); ldc_send_vers(lc); ifp->if_flags |= IFF_RUNNING; }
int vldcpclose(dev_t dev, int flag, int mode, struct proc *p) { struct vldcp_softc *sc; sc = vldcp_lookup(dev); if (sc == NULL) return (ENXIO); cbus_intr_setenabled(sc->sc_tx_sysino, INTR_DISABLED); cbus_intr_setenabled(sc->sc_rx_sysino, INTR_DISABLED); hv_ldc_tx_qconf(sc->sc_lc.lc_id, 0, 0); hv_ldc_rx_qconf(sc->sc_lc.lc_id, 0, 0); device_unref(&sc->sc_dv); return (0); }
void ldc_reset(struct ldc_conn *lc) { int err; vaddr_t va; paddr_t pa; DPRINTF(("Resetting connection\n")); mutex_enter(&lc->lc_txq->lq_mtx); #if OPENBSD_BUSDMA err = hv_ldc_tx_qconf(lc->lc_id, lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries); #else va = lc->lc_txq->lq_va; pa = 0; if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) panic("pmap_extract failed %lx\n", va); err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_txq->lq_nentries); #endif if (err != H_EOK) printf("%s: hv_ldc_tx_qconf %d\n", __func__, err); #if OPENBSD_BUSDMA err = hv_ldc_rx_qconf(lc->lc_id, lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries); #else va = lc->lc_rxq->lq_va; pa = 0; if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) panic("pmap_extract failed %lx\n", va); err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_rxq->lq_nentries); #endif if (err != H_EOK) printf("%s: hv_ldc_rx_qconf %d\n", __func__, err); lc->lc_tx_seqid = 0; lc->lc_state = 0; lc->lc_tx_state = lc->lc_rx_state = LDC_CHANNEL_DOWN; mutex_exit(&lc->lc_txq->lq_mtx); lc->lc_reset(lc); }
void vnet_stop(struct ifnet *ifp) { struct vnet_softc *sc = ifp->if_softc; struct ldc_conn *lc = &sc->sc_lc; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; cbus_intr_setenabled(sc->sc_bustag, sc->sc_tx_ino, INTR_DISABLED); cbus_intr_setenabled(sc->sc_bustag, sc->sc_rx_ino, INTR_DISABLED); hv_ldc_tx_qconf(lc->lc_id, 0, 0); hv_ldc_rx_qconf(lc->lc_id, 0, 0); lc->lc_tx_seqid = 0; lc->lc_state = 0; lc->lc_tx_state = lc->lc_rx_state = LDC_CHANNEL_DOWN; vnet_ldc_reset(lc); vnet_dring_free(sc->sc_dmatag, sc->sc_vd); hv_ldc_set_map_table(lc->lc_id, 0, 0); ldc_map_free(sc->sc_dmatag, sc->sc_lm); }
void vdsk_attach(struct device *parent, struct device *self, void *aux) { struct vdsk_softc *sc = (struct vdsk_softc *)self; struct cbus_attach_args *ca = aux; struct scsibus_attach_args saa; struct ldc_conn *lc; uint64_t sysino[2]; int err, s; int timeout; sc->sc_bustag = ca->ca_bustag; sc->sc_dmatag = ca->ca_dmatag; if (cbus_intr_map(ca->ca_node, ca->ca_tx_ino, &sysino[0]) || cbus_intr_map(ca->ca_node, ca->ca_rx_ino, &sysino[1])) { printf(": can't map interrupt\n"); return; } printf(": ivec 0x%lx, 0x%lx", sysino[0], sysino[1]); /* * Un-configure queues before registering interrupt handlers, * such that we dont get any stale LDC packets or events. */ hv_ldc_tx_qconf(ca->ca_id, 0, 0); hv_ldc_rx_qconf(ca->ca_id, 0, 0); sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, sysino[0], IPL_BIO, 0, vdsk_tx_intr, sc, sc->sc_dv.dv_xname); sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, sysino[1], IPL_BIO, 0, vdsk_rx_intr, sc, sc->sc_dv.dv_xname); if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) { printf(", can't establish interrupt\n"); return; } lc = &sc->sc_lc; lc->lc_id = ca->ca_id; lc->lc_sc = sc; lc->lc_reset = vdsk_ldc_reset; lc->lc_start = vdsk_ldc_start; lc->lc_rx_data = vdsk_rx_data; lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES); if (lc->lc_txq == NULL) { printf(", can't allocate tx queue\n"); return; } lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES); if (lc->lc_rxq == NULL) { printf(", can't allocate rx queue\n"); goto free_txqueue; } sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048); if (sc->sc_lm == NULL) { printf(", can't allocate LDC mapping table\n"); goto free_rxqueue; } err = hv_ldc_set_map_table(lc->lc_id, sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries); if (err != H_EOK) { printf("hv_ldc_set_map_table %d\n", err); goto free_map; } sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32); if (sc->sc_vd == NULL) { printf(", can't allocate dring\n"); goto free_map; } sc->sc_vsd = malloc(32 * sizeof(*sc->sc_vsd), M_DEVBUF, M_NOWAIT); if (sc->sc_vsd == NULL) { printf(", can't allocate software ring\n"); goto free_dring; } sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr; sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK; sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW; sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W; sc->sc_lm->lm_next = 1; sc->sc_lm->lm_count = 1; err = hv_ldc_tx_qconf(lc->lc_id, lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries); if (err != H_EOK) printf("hv_ldc_tx_qconf %d\n", err); err = hv_ldc_rx_qconf(lc->lc_id, lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries); if (err != H_EOK) printf("hv_ldc_rx_qconf %d\n", err); cbus_intr_setenabled(sysino[0], INTR_ENABLED); cbus_intr_setenabled(sysino[1], INTR_ENABLED); ldc_send_vers(lc); printf("\n"); /* * Interrupts aren't enabled during autoconf, so poll for VIO * peer-to-peer hanshake completion. */ s = splbio(); timeout = 1000; do { if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED) break; delay(1000); } while(--timeout > 0); splx(s); if (sc->sc_vio_state != VIO_ESTABLISHED) return; scsi_iopool_init(&sc->sc_iopool, sc, vdsk_io_get, vdsk_io_put); sc->sc_switch.scsi_cmd = vdsk_scsi_cmd; sc->sc_switch.scsi_minphys = scsi_minphys; sc->sc_switch.dev_probe = vdsk_dev_probe; sc->sc_switch.dev_free = vdsk_dev_free; sc->sc_link.adapter = &sc->sc_switch; sc->sc_link.adapter_softc = self; sc->sc_link.adapter_buswidth = 2; sc->sc_link.luns = 1; /* XXX slices should be presented as luns? */ sc->sc_link.adapter_target = 2; sc->sc_link.openings = sc->sc_vd->vd_nentries - 1; sc->sc_link.pool = &sc->sc_iopool; bzero(&saa, sizeof(saa)); saa.saa_sc_link = &sc->sc_link; config_found(self, &saa, scsiprint); return; free_dring: vdsk_dring_free(sc->sc_dmatag, sc->sc_vd); free_map: hv_ldc_set_map_table(lc->lc_id, 0, 0); ldc_map_free(sc->sc_dmatag, sc->sc_lm); free_rxqueue: ldc_queue_free(sc->sc_dmatag, lc->lc_rxq); free_txqueue: ldc_queue_free(sc->sc_dmatag, lc->lc_txq); }
void vnet_attach(struct device *parent, struct device *self, void *aux) { struct vnet_softc *sc = (struct vnet_softc *)self; struct cbus_attach_args *ca = aux; struct ldc_conn *lc; struct ifnet *ifp; sc->sc_bustag = ca->ca_bustag; sc->sc_dmatag = ca->ca_dmatag; sc->sc_tx_ino = ca->ca_tx_ino; sc->sc_rx_ino = ca->ca_rx_ino; printf(": ivec 0x%llx, 0x%llx", sc->sc_tx_ino, sc->sc_rx_ino); /* * Un-configure queues before registering interrupt handlers, * such that we dont get any stale LDC packets or events. */ hv_ldc_tx_qconf(ca->ca_id, 0, 0); hv_ldc_rx_qconf(ca->ca_id, 0, 0); sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_tx_ino, IPL_NET, BUS_INTR_ESTABLISH_MPSAFE, vnet_tx_intr, sc, sc->sc_dv.dv_xname); sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_rx_ino, IPL_NET, BUS_INTR_ESTABLISH_MPSAFE, vnet_rx_intr, sc, sc->sc_dv.dv_xname); if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) { printf(", can't establish interrupt\n"); return; } lc = &sc->sc_lc; lc->lc_id = ca->ca_id; lc->lc_sc = sc; lc->lc_reset = vnet_ldc_reset; lc->lc_start = vnet_ldc_start; lc->lc_rx_data = vio_rx_data; timeout_set(&sc->sc_handshake_to, vnet_handshake, sc); sc->sc_peer_state = VIO_DP_STOPPED; lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VNET_TX_ENTRIES); if (lc->lc_txq == NULL) { printf(", can't allocate tx queue\n"); return; } lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VNET_RX_ENTRIES); if (lc->lc_rxq == NULL) { printf(", can't allocate rx queue\n"); goto free_txqueue; } if (OF_getprop(ca->ca_node, "local-mac-address", sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) > 0) printf(", address %s", ether_sprintf(sc->sc_ac.ac_enaddr)); /* * Each interface gets its own pool. */ pool_init(&sc->sc_pool, 2048, 0, 0, 0, sc->sc_dv.dv_xname, NULL); pool_setipl(&sc->sc_pool, IPL_NET); ifp = &sc->sc_ac.ac_if; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_link_state = LINK_STATE_DOWN; ifp->if_ioctl = vnet_ioctl; ifp->if_start = vnet_start; ifp->if_watchdog = vnet_watchdog; strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ); IFQ_SET_MAXLEN(&ifp->if_snd, 31); /* XXX */ IFQ_SET_READY(&ifp->if_snd); ifmedia_init(&sc->sc_media, 0, vnet_media_change, vnet_media_status); ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); if_attach(ifp); ether_ifattach(ifp); printf("\n"); return; free_txqueue: ldc_queue_free(sc->sc_dmatag, lc->lc_txq); }
void vdsp_attach(struct device *parent, struct device *self, void *aux) { struct vdsp_softc *sc = (struct vdsp_softc *)self; struct cbus_attach_args *ca = aux; struct ldc_conn *lc; sc->sc_idx = ca->ca_idx; sc->sc_bustag = ca->ca_bustag; sc->sc_dmatag = ca->ca_dmatag; sc->sc_tx_ino = ca->ca_tx_ino; sc->sc_rx_ino = ca->ca_rx_ino; printf(": ivec 0x%llx, 0x%llx", sc->sc_tx_ino, sc->sc_rx_ino); mtx_init(&sc->sc_desc_mtx, IPL_BIO); /* * Un-configure queues before registering interrupt handlers, * such that we dont get any stale LDC packets or events. */ hv_ldc_tx_qconf(ca->ca_id, 0, 0); hv_ldc_rx_qconf(ca->ca_id, 0, 0); sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_tx_ino, IPL_BIO, BUS_INTR_ESTABLISH_MPSAFE, vdsp_tx_intr, sc, sc->sc_dv.dv_xname); sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_rx_ino, IPL_BIO, BUS_INTR_ESTABLISH_MPSAFE, vdsp_rx_intr, sc, sc->sc_dv.dv_xname); if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) { printf(", can't establish interrupt\n"); return; } lc = &sc->sc_lc; lc->lc_id = ca->ca_id; lc->lc_sc = sc; lc->lc_reset = vdsp_ldc_reset; lc->lc_start = vdsp_ldc_start; lc->lc_rx_data = vdsp_rx_data; lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES); if (lc->lc_txq == NULL) { printf(", can't allocate tx queue\n"); return; } lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES); if (lc->lc_rxq == NULL) { printf(", can't allocate rx queue\n"); goto free_txqueue; } task_set(&sc->sc_open_task, vdsp_open, sc); task_set(&sc->sc_alloc_task, vdsp_alloc, sc); task_set(&sc->sc_close_task, vdsp_close, sc); task_set(&sc->sc_read_task, vdsp_read, sc); printf("\n"); return; #if 0 free_rxqueue: ldc_queue_free(sc->sc_dmatag, lc->lc_rxq); #endif free_txqueue: ldc_queue_free(sc->sc_dmatag, lc->lc_txq); }
void vldcp_attach(struct device *parent, struct device *self, void *aux) { struct vldcp_softc *sc = (struct vldcp_softc *)self; struct cbus_attach_args *ca = aux; struct vldc_svc *svc; struct ldc_conn *lc; sc->sc_bustag = ca->ca_bustag; sc->sc_dmatag = ca->ca_dmatag; if (cbus_intr_map(ca->ca_node, ca->ca_tx_ino, &sc->sc_tx_sysino) || cbus_intr_map(ca->ca_node, ca->ca_rx_ino, &sc->sc_rx_sysino)) { printf(": can't map interrupt\n"); return; } printf(": ivec 0x%llx, 0x%llx", sc->sc_tx_sysino, sc->sc_rx_sysino); /* * Un-configure queues before registering interrupt handlers, * such that we dont get any stale LDC packets or events. */ hv_ldc_tx_qconf(ca->ca_id, 0, 0); hv_ldc_rx_qconf(ca->ca_id, 0, 0); sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_tx_sysino, IPL_TTY, 0, vldcp_tx_intr, sc, sc->sc_dv.dv_xname); sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_rx_sysino, IPL_TTY, 0, vldcp_rx_intr, sc, sc->sc_dv.dv_xname); if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) { printf(", can't establish interrupt\n"); return; } lc = &sc->sc_lc; lc->lc_id = ca->ca_id; lc->lc_sc = sc; lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VLDCP_TX_ENTRIES); if (lc->lc_txq == NULL) { printf(", can't allocate tx queue\n"); return; } lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VLDCP_RX_ENTRIES); if (lc->lc_rxq == NULL) { printf(", can't allocate rx queue\n"); goto free_txqueue; } for (svc = vldc_svc; svc->vs_name != NULL; svc++) { if (strcmp(ca->ca_name, svc->vs_name) == 0) { svc->vs_sc = sc; break; } } if (strncmp(ca->ca_name, "ldom-", 5) == 0 && strcmp(ca->ca_name, "ldom-primary") != 0) { int minor = VLDC_LDOM_OFFSET + vldc_num_ldoms++; if (minor < nitems(vldc_svc)) vldc_svc[minor].vs_sc = sc; } printf(" channel \"%s\"\n", ca->ca_name); return; #if 0 free_rxqueue: ldc_queue_free(sc->sc_dmatag, lc->lc_rxq); #endif free_txqueue: ldc_queue_free(sc->sc_dmatag, lc->lc_txq); }