int
vmbus_chan_open(struct vmbus_channel *chan, int txbr_size, int rxbr_size,
    const void *udata, int udlen, vmbus_chan_callback_t cb, void *cbarg)
{
	struct vmbus_softc *sc = chan->ch_vmbus;
	const struct vmbus_chanmsg_chopen_resp *resp;
	const struct vmbus_message *msg;
	struct vmbus_chanmsg_chopen *req;
	struct vmbus_msghc *mh;
	uint32_t status;
	int error;
	uint8_t *br;

	if (udlen > VMBUS_CHANMSG_CHOPEN_UDATA_SIZE) {
		device_printf(sc->vmbus_dev,
		    "invalid udata len %d for chan%u\n", udlen, chan->ch_id);
		return EINVAL;
	}
	KASSERT((txbr_size & PAGE_MASK) == 0,
	    ("send bufring size is not multiple page"));
	KASSERT((rxbr_size & PAGE_MASK) == 0,
	    ("recv bufring size is not multiple page"));

	if (atomic_testandset_int(&chan->ch_stflags,
	    VMBUS_CHAN_ST_OPENED_SHIFT))
		panic("double-open chan%u", chan->ch_id);

	chan->ch_cb = cb;
	chan->ch_cbarg = cbarg;

	vmbus_chan_update_evtflagcnt(sc, chan);

	chan->ch_tq = VMBUS_PCPU_GET(chan->ch_vmbus, event_tq, chan->ch_cpuid);
	if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
		TASK_INIT(&chan->ch_task, 0, vmbus_chan_task, chan);
	else
		TASK_INIT(&chan->ch_task, 0, vmbus_chan_task_nobatch, chan);

	/*
	 * Allocate the TX+RX bufrings.
	 * XXX should use ch_dev dtag
	 */
	br = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev),
	    PAGE_SIZE, 0, txbr_size + rxbr_size, &chan->ch_bufring_dma,
	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
	if (br == NULL) {
		device_printf(sc->vmbus_dev, "bufring allocation failed\n");
		error = ENOMEM;
		goto failed;
	}
	chan->ch_bufring = br;

	/* TX bufring comes first */
	vmbus_txbr_setup(&chan->ch_txbr, br, txbr_size);
	/* RX bufring immediately follows TX bufring */
	vmbus_rxbr_setup(&chan->ch_rxbr, br + txbr_size, rxbr_size);

	/* Create sysctl tree for this channel */
	vmbus_chan_sysctl_create(chan);

	/*
	 * Connect the bufrings, both RX and TX, to this channel.
	 */
	error = vmbus_chan_gpadl_connect(chan, chan->ch_bufring_dma.hv_paddr,
	    txbr_size + rxbr_size, &chan->ch_bufring_gpadl);
	if (error) {
		device_printf(sc->vmbus_dev,
		    "failed to connect bufring GPADL to chan%u\n", chan->ch_id);
		goto failed;
	}

	/*
	 * Open channel w/ the bufring GPADL on the target CPU.
	 */
	mh = vmbus_msghc_get(sc, sizeof(*req));
	if (mh == NULL) {
		device_printf(sc->vmbus_dev,
		    "can not get msg hypercall for chopen(chan%u)\n",
		    chan->ch_id);
		error = ENXIO;
		goto failed;
	}

	req = vmbus_msghc_dataptr(mh);
	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHOPEN;
	req->chm_chanid = chan->ch_id;
	req->chm_openid = chan->ch_id;
	req->chm_gpadl = chan->ch_bufring_gpadl;
	req->chm_vcpuid = chan->ch_vcpuid;
	req->chm_txbr_pgcnt = txbr_size >> PAGE_SHIFT;
	if (udlen > 0)
		memcpy(req->chm_udata, udata, udlen);

	error = vmbus_msghc_exec(sc, mh);
	if (error) {
		device_printf(sc->vmbus_dev,
		    "chopen(chan%u) msg hypercall exec failed: %d\n",
		    chan->ch_id, error);
		vmbus_msghc_put(sc, mh);
		goto failed;
	}

	msg = vmbus_msghc_wait_result(sc, mh);
	resp = (const struct vmbus_chanmsg_chopen_resp *)msg->msg_data;
	status = resp->chm_status;

	vmbus_msghc_put(sc, mh);

	if (status == 0) {
		if (bootverbose) {
			device_printf(sc->vmbus_dev, "chan%u opened\n",
			    chan->ch_id);
		}
		return 0;
	}

	device_printf(sc->vmbus_dev, "failed to open chan%u\n", chan->ch_id);
	error = ENXIO;

failed:
	if (chan->ch_bufring_gpadl) {
		vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
		chan->ch_bufring_gpadl = 0;
	}
	if (chan->ch_bufring != NULL) {
		hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
		chan->ch_bufring = NULL;
	}
	atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
	return error;
}
Example #2
0
static int 
hn_nvs_conn_chim(struct hn_softc *sc)
{
	struct vmbus_xact *xact = NULL;
	struct hn_nvs_chim_conn *chim;
	const struct hn_nvs_chim_connresp *resp;
	size_t resp_len;
	uint32_t status, sectsz;
	int error;

	/*
	 * Connect chimney sending buffer GPADL to the primary channel.
	 *
	 * NOTE:
	 * Only primary channel has chimney sending buffer connected to it.
	 * Sub-channels just share this chimney sending buffer.
	 */
	error = vmbus_chan_gpadl_connect(sc->hn_prichan,
  	    sc->hn_chim_dma.hv_paddr, HN_CHIM_SIZE, &sc->hn_chim_gpadl);
	if (error) {
		if_printf(sc->hn_ifp, "chim gpadl conn failed: %d\n", error);
		goto cleanup;
	}

	/*
	 * Connect chimney sending buffer to NVS
	 */

	xact = vmbus_xact_get(sc->hn_xact, sizeof(*chim));
	if (xact == NULL) {
		if_printf(sc->hn_ifp, "no xact for nvs chim conn\n");
		error = ENXIO;
		goto cleanup;
	}
	chim = vmbus_xact_req_data(xact);
	chim->nvs_type = HN_NVS_TYPE_CHIM_CONN;
	chim->nvs_gpadl = sc->hn_chim_gpadl;
	chim->nvs_sig = HN_NVS_CHIM_SIG;

	resp_len = sizeof(*resp);
	resp = hn_nvs_xact_execute(sc, xact, chim, sizeof(*chim), &resp_len,
	    HN_NVS_TYPE_CHIM_CONNRESP);
	if (resp == NULL) {
		if_printf(sc->hn_ifp, "exec nvs chim conn failed\n");
		error = EIO;
		goto cleanup;
	}

	status = resp->nvs_status;
	sectsz = resp->nvs_sectsz;
	vmbus_xact_put(xact);
	xact = NULL;

	if (status != HN_NVS_STATUS_OK) {
		if_printf(sc->hn_ifp, "nvs chim conn failed: %x\n", status);
		error = EIO;
		goto cleanup;
	}
	if (sectsz == 0) {
		if_printf(sc->hn_ifp, "zero chimney sending buffer "
		    "section size\n");
		return (0);
	}

	sc->hn_chim_szmax = sectsz;
	sc->hn_chim_cnt = HN_CHIM_SIZE / sc->hn_chim_szmax;
	if (HN_CHIM_SIZE % sc->hn_chim_szmax != 0) {
		if_printf(sc->hn_ifp, "chimney sending sections are "
		    "not properly aligned\n");
	}
	if (sc->hn_chim_cnt % LONG_BIT != 0) {
		if_printf(sc->hn_ifp, "discard %d chimney sending sections\n",
		    sc->hn_chim_cnt % LONG_BIT);
	}

	sc->hn_chim_bmap_cnt = sc->hn_chim_cnt / LONG_BIT;
	sc->hn_chim_bmap = malloc(sc->hn_chim_bmap_cnt * sizeof(u_long),
	    M_DEVBUF, M_WAITOK | M_ZERO);

	/* Done! */
	sc->hn_flags |= HN_FLAG_CHIM_CONNECTED;
	if (bootverbose) {
		if_printf(sc->hn_ifp, "chimney sending buffer %d/%d\n",
		    sc->hn_chim_szmax, sc->hn_chim_cnt);
	}
	return (0);

cleanup:
	if (xact != NULL)
		vmbus_xact_put(xact);
	hn_nvs_disconn_chim(sc);
	return (error);
}
Example #3
0
/*
 * Net VSC initialize send buffer with net VSP
 */
static int 
hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
{
	netvsc_dev *net_dev;
	nvsp_msg *init_pkt;
	int ret = 0;

	net_dev = hv_nv_get_outbound_net_device(sc);
	if (!net_dev) {
		return (ENODEV);
	}

	net_dev->send_buf = hyperv_dmamem_alloc(bus_get_dma_tag(sc->hn_dev),
	    PAGE_SIZE, 0, net_dev->send_buf_size, &net_dev->txbuf_dma,
	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
	if (net_dev->send_buf == NULL) {
		device_printf(sc->hn_dev, "allocate chimney txbuf failed\n");
		return ENOMEM;
	}

	/*
	 * Connect chimney sending buffer GPADL to the primary channel.
	 *
	 * NOTE:
	 * Only primary channel has chimney sending buffer connected to it.
	 * Sub-channels just share this chimney sending buffer.
	 */
	ret = vmbus_chan_gpadl_connect(sc->hn_prichan,
  	    net_dev->txbuf_dma.hv_paddr, net_dev->send_buf_size,
	    &net_dev->send_buf_gpadl_handle);
	if (ret != 0) {
		device_printf(sc->hn_dev, "chimney sending buffer gpadl "
		    "connect failed: %d\n", ret);
		goto cleanup;
	}

	/* Notify the NetVsp of the gpadl handle */

	init_pkt = &net_dev->channel_init_packet;

	memset(init_pkt, 0, sizeof(nvsp_msg));

	init_pkt->hdr.msg_type = nvsp_msg_1_type_send_send_buf;
	init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
	    net_dev->send_buf_gpadl_handle;
	init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
	    NETVSC_SEND_BUFFER_ID;

	/* Send the gpadl notification request */

	ret = vmbus_chan_send(sc->hn_prichan,
	    VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
  	    init_pkt, sizeof(nvsp_msg), (uint64_t)init_pkt);
	if (ret != 0) {
		goto cleanup;
	}

	sema_wait(&net_dev->channel_init_sema);

	/* Check the response */
	if (init_pkt->msgs.vers_1_msgs.send_send_buf_complete.status
	    != nvsp_status_success) {
		ret = EINVAL;
		goto cleanup;
	}

	net_dev->send_section_size =
	    init_pkt->msgs.vers_1_msgs.send_send_buf_complete.section_size;
	net_dev->send_section_count =
	    net_dev->send_buf_size / net_dev->send_section_size;
	net_dev->bitsmap_words = howmany(net_dev->send_section_count,
	    BITS_PER_LONG);
	net_dev->send_section_bitsmap =
	    malloc(net_dev->bitsmap_words * sizeof(long), M_NETVSC,
	    M_WAITOK | M_ZERO);

	goto exit;

cleanup:
	hv_nv_destroy_send_buffer(net_dev);
	
exit:
	return (ret);
}
Example #4
0
static int 
hn_nvs_conn_rxbuf(struct hn_softc *sc)
{
	struct vmbus_xact *xact = NULL;
	struct hn_nvs_rxbuf_conn *conn;
	const struct hn_nvs_rxbuf_connresp *resp;
	size_t resp_len;
	uint32_t status;
	int error, rxbuf_size;

	/*
	 * Limit RXBUF size for old NVS.
	 */
	if (sc->hn_nvs_ver <= HN_NVS_VERSION_2)
		rxbuf_size = HN_RXBUF_SIZE_COMPAT;
	else
		rxbuf_size = HN_RXBUF_SIZE;

	/*
	 * Connect the RXBUF GPADL to the primary channel.
	 *
	 * NOTE:
	 * Only primary channel has RXBUF connected to it.  Sub-channels
	 * just share this RXBUF.
	 */
	error = vmbus_chan_gpadl_connect(sc->hn_prichan,
	    sc->hn_rxbuf_dma.hv_paddr, rxbuf_size, &sc->hn_rxbuf_gpadl);
	if (error) {
		if_printf(sc->hn_ifp, "rxbuf gpadl conn failed: %d\n",
		    error);
		goto cleanup;
	}

	/*
	 * Connect RXBUF to NVS.
	 */

	xact = vmbus_xact_get(sc->hn_xact, sizeof(*conn));
	if (xact == NULL) {
		if_printf(sc->hn_ifp, "no xact for nvs rxbuf conn\n");
		error = ENXIO;
		goto cleanup;
	}
	conn = vmbus_xact_req_data(xact);
	conn->nvs_type = HN_NVS_TYPE_RXBUF_CONN;
	conn->nvs_gpadl = sc->hn_rxbuf_gpadl;
	conn->nvs_sig = HN_NVS_RXBUF_SIG;

	resp_len = sizeof(*resp);
	resp = hn_nvs_xact_execute(sc, xact, conn, sizeof(*conn), &resp_len,
	    HN_NVS_TYPE_RXBUF_CONNRESP);
	if (resp == NULL) {
		if_printf(sc->hn_ifp, "exec nvs rxbuf conn failed\n");
		error = EIO;
		goto cleanup;
	}

	status = resp->nvs_status;
	vmbus_xact_put(xact);
	xact = NULL;

	if (status != HN_NVS_STATUS_OK) {
		if_printf(sc->hn_ifp, "nvs rxbuf conn failed: %x\n", status);
		error = EIO;
		goto cleanup;
	}
	sc->hn_flags |= HN_FLAG_RXBUF_CONNECTED;

	return (0);

cleanup:
	if (xact != NULL)
		vmbus_xact_put(xact);
	hn_nvs_disconn_rxbuf(sc);
	return (error);
}
Example #5
0
/*
 * Net VSC initialize receive buffer with net VSP
 * 
 * Net VSP:  Network virtual services client, also known as the
 *     Hyper-V extensible switch and the synthetic data path.
 */
static int 
hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *sc)
{
	netvsc_dev *net_dev;
	nvsp_msg *init_pkt;
	int ret = 0;

	net_dev = hv_nv_get_outbound_net_device(sc);
	if (!net_dev) {
		return (ENODEV);
	}

	net_dev->rx_buf = hyperv_dmamem_alloc(bus_get_dma_tag(sc->hn_dev),
	    PAGE_SIZE, 0, net_dev->rx_buf_size, &net_dev->rxbuf_dma,
	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
	if (net_dev->rx_buf == NULL) {
		device_printf(sc->hn_dev, "allocate rxbuf failed\n");
		return ENOMEM;
	}

	/*
	 * Connect the RXBUF GPADL to the primary channel.
	 *
	 * NOTE:
	 * Only primary channel has RXBUF connected to it.  Sub-channels
	 * just share this RXBUF.
	 */
	ret = vmbus_chan_gpadl_connect(sc->hn_prichan,
	    net_dev->rxbuf_dma.hv_paddr, net_dev->rx_buf_size,
	    &net_dev->rx_buf_gpadl_handle);
	if (ret != 0) {
		device_printf(sc->hn_dev, "rxbuf gpadl connect failed: %d\n",
		    ret);
		goto cleanup;
	}
	
	/* sema_wait(&ext->channel_init_sema); KYS CHECK */

	/* Notify the NetVsp of the gpadl handle */
	init_pkt = &net_dev->channel_init_packet;

	memset(init_pkt, 0, sizeof(nvsp_msg));

	init_pkt->hdr.msg_type = nvsp_msg_1_type_send_rx_buf;
	init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
	    net_dev->rx_buf_gpadl_handle;
	init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
	    NETVSC_RECEIVE_BUFFER_ID;

	/* Send the gpadl notification request */

	ret = vmbus_chan_send(sc->hn_prichan,
	    VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
	    init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
	if (ret != 0) {
		goto cleanup;
	}

	sema_wait(&net_dev->channel_init_sema);

	/* Check the response */
	if (init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.status
	    != nvsp_status_success) {
		ret = EINVAL;
		goto cleanup;
	}

	net_dev->rx_section_count =
	    init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.num_sections;

	net_dev->rx_sections = malloc(net_dev->rx_section_count *
	    sizeof(nvsp_1_rx_buf_section), M_NETVSC, M_WAITOK);
	memcpy(net_dev->rx_sections, 
	    init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.sections,
	    net_dev->rx_section_count * sizeof(nvsp_1_rx_buf_section));


	/*
	 * For first release, there should only be 1 section that represents
	 * the entire receive buffer
	 */
	if (net_dev->rx_section_count != 1
	    || net_dev->rx_sections->offset != 0) {
		ret = EINVAL;
		goto cleanup;
	}

	goto exit;

cleanup:
	hv_nv_destroy_rx_buffer(net_dev);
	
exit:
	return (ret);
}