Пример #1
0
static void
ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num)
{
	struct ntb_transport_mw *mw;
	struct ntb_transport_qp *qp;
	vm_paddr_t mw_base;
	uint64_t mw_size, qp_offset;
	size_t tx_size;
	unsigned num_qps_mw, mw_num, mw_count;

	mw_count = nt->mw_count;
	mw_num = QP_TO_MW(nt, qp_num);
	mw = &nt->mw_vec[mw_num];

	qp = &nt->qp_vec[qp_num];
	qp->qp_num = qp_num;
	qp->transport = nt;
	qp->ntb = nt->ntb;
	qp->client_ready = false;
	qp->event_handler = NULL;
	ntb_qp_link_down_reset(qp);

	if (nt->qp_count % mw_count && mw_num + 1 < nt->qp_count / mw_count)
		num_qps_mw = nt->qp_count / mw_count + 1;
	else
		num_qps_mw = nt->qp_count / mw_count;

	mw_base = mw->phys_addr;
	mw_size = mw->phys_size;

	tx_size = mw_size / num_qps_mw;
	qp_offset = tx_size * (qp_num / mw_count);

	qp->tx_mw = mw->vbase + qp_offset;
	KASSERT(qp->tx_mw != NULL, ("uh oh?"));

	/* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */
	qp->tx_mw_phys = mw_base + qp_offset;
	KASSERT(qp->tx_mw_phys != 0, ("uh oh?"));

	tx_size -= sizeof(struct ntb_rx_info);
	qp->rx_info = (void *)(qp->tx_mw + tx_size);

	/* Due to house-keeping, there must be at least 2 buffs */
	qp->tx_max_frame = qmin(tx_size / 2,
	    transport_mtu + sizeof(struct ntb_payload_header));
	qp->tx_max_entry = tx_size / qp->tx_max_frame;

	callout_init(&qp->link_work, 0);
	callout_init(&qp->queue_full, 1);
	callout_init(&qp->rx_full, 1);

	mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN);
	mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN);
	TASK_INIT(&qp->rx_completion_task, 0, ntb_complete_rxc, qp);
	TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp);

	STAILQ_INIT(&qp->rx_post_q);
	STAILQ_INIT(&qp->rx_pend_q);
	STAILQ_INIT(&qp->tx_free_q);

	callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
}
Пример #2
0
static int
ntb_transport_probe(struct ntb_softc *ntb)
{
	struct ntb_transport_ctx *nt = &net_softc;
	struct ntb_transport_mw *mw;
	uint64_t qp_bitmap;
	int rc;
	unsigned i;

	nt->mw_count = ntb_mw_count(ntb);
	for (i = 0; i < nt->mw_count; i++) {
		mw = &nt->mw_vec[i];

		rc = ntb_mw_get_range(ntb, i, &mw->phys_addr, &mw->vbase,
		    &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size,
		    &mw->addr_limit);
		if (rc != 0)
			goto err;

		mw->buff_size = 0;
		mw->xlat_size = 0;
		mw->virt_addr = NULL;
		mw->dma_addr = 0;
	}

	qp_bitmap = ntb_db_valid_mask(ntb);
	nt->qp_count = flsll(qp_bitmap);
	KASSERT(nt->qp_count != 0, ("bogus db bitmap"));
	nt->qp_count -= 1;

	if (max_num_clients != 0 && max_num_clients < nt->qp_count)
		nt->qp_count = max_num_clients;
	else if (nt->mw_count < nt->qp_count)
		nt->qp_count = nt->mw_count;
	KASSERT(nt->qp_count <= QP_SETSIZE, ("invalid qp_count"));

	mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF);
	mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF);

	nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_IF,
	    M_WAITOK | M_ZERO);

	for (i = 0; i < nt->qp_count; i++) {
		set_bit(i, &nt->qp_bitmap);
		set_bit(i, &nt->qp_bitmap_free);
		ntb_transport_init_queue(nt, i);
	}

	callout_init(&nt->link_work, 0);
	callout_init(&nt->link_watchdog, 0);
	TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt);

	rc = ntb_set_ctx(ntb, nt, &ntb_transport_ops);
	if (rc != 0)
		goto err;

	nt->link_is_up = false;
	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
	ntb_link_event(ntb);

	callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
	if (enable_xeon_watchdog != 0)
		callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt);
	return (0);

err:
	free(nt->qp_vec, M_NTB_IF);
	nt->qp_vec = NULL;
	return (rc);
}
Пример #3
0
static int
sfxge_create(struct sfxge_softc *sc)
{
	device_t dev;
	efx_nic_t *enp;
	int error;

	dev = sc->dev;

	sx_init(&sc->softc_lock, "sfxge_softc");

	sc->stats_node = SYSCTL_ADD_NODE(
		device_get_sysctl_ctx(dev),
		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
		OID_AUTO, "stats", CTLFLAG_RD, NULL, "Statistics");
	if (!sc->stats_node) {
		error = ENOMEM;
		goto fail;
	}

	TASK_INIT(&sc->task_reset, 0, sfxge_reset, sc);

	(void) pci_enable_busmaster(dev);

	/* Initialize DMA mappings. */
	if ((error = sfxge_dma_init(sc)) != 0)
		goto fail;

	/* Map the device registers. */
	if ((error = sfxge_bar_init(sc)) != 0)
		goto fail;

	error = efx_family(pci_get_vendor(dev), pci_get_device(dev),
	    &sc->family);
	KASSERT(error == 0, ("Family should be filtered by sfxge_probe()"));

	/* Create the common code nic object. */
	mtx_init(&sc->enp_lock, "sfxge_nic", NULL, MTX_DEF);
	if ((error = efx_nic_create(sc->family, (efsys_identifier_t *)sc,
	    &sc->bar, &sc->enp_lock, &enp)) != 0)
		goto fail3;
	sc->enp = enp;

	/* Initialize MCDI to talk to the microcontroller. */
	if ((error = sfxge_mcdi_init(sc)) != 0)
		goto fail4;

	/* Probe the NIC and build the configuration data area. */
	if ((error = efx_nic_probe(enp)) != 0)
		goto fail5;

	/* Initialize the NVRAM. */
	if ((error = efx_nvram_init(enp)) != 0)
		goto fail6;

	/* Initialize the VPD. */
	if ((error = efx_vpd_init(enp)) != 0)
		goto fail7;

	/* Reset the NIC. */
	if ((error = efx_nic_reset(enp)) != 0)
		goto fail8;

	/* Initialize buffer table allocation. */
	sc->buffer_table_next = 0;

	/* Set up interrupts. */
	if ((error = sfxge_intr_init(sc)) != 0)
		goto fail8;

	/* Initialize event processing state. */
	if ((error = sfxge_ev_init(sc)) != 0)
		goto fail11;

	/* Initialize receive state. */
	if ((error = sfxge_rx_init(sc)) != 0)
		goto fail12;

	/* Initialize transmit state. */
	if ((error = sfxge_tx_init(sc)) != 0)
		goto fail13;

	/* Initialize port state. */
	if ((error = sfxge_port_init(sc)) != 0)
		goto fail14;

	sc->init_state = SFXGE_INITIALIZED;

	return (0);

fail14:
	sfxge_tx_fini(sc);

fail13:
	sfxge_rx_fini(sc);

fail12:
	sfxge_ev_fini(sc);

fail11:
	sfxge_intr_fini(sc);

fail8:
	efx_vpd_fini(enp);

fail7:
	efx_nvram_fini(enp);

fail6:
	efx_nic_unprobe(enp);

fail5:
	sfxge_mcdi_fini(sc);

fail4:
	sc->enp = NULL;
	efx_nic_destroy(enp);
	mtx_destroy(&sc->enp_lock);

fail3:
	sfxge_bar_fini(sc);
	(void) pci_disable_busmaster(sc->dev);

fail:
	sc->dev = NULL;
	sx_destroy(&sc->softc_lock);
	return (error);
}
Пример #4
0
static void *
nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg)
{
	uint8_t			descr[NVME_MODEL_NUMBER_LENGTH+1];
	struct nvd_disk		*ndisk;
	struct disk		*disk;
	struct nvd_controller	*ctrlr = ctrlr_arg;

	ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK);

	disk = disk_alloc();
	disk->d_strategy = nvd_strategy;
	disk->d_ioctl = nvd_ioctl;
	disk->d_name = NVD_STR;
	disk->d_drv1 = ndisk;

	disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns);
	disk->d_sectorsize = nvme_ns_get_sector_size(ns);
	disk->d_mediasize = (off_t)nvme_ns_get_size(ns);
	disk->d_delmaxsize = (off_t)nvme_ns_get_size(ns);
	disk->d_stripesize = nvme_ns_get_optimal_sector_size(ns);

	if (TAILQ_EMPTY(&disk_head))
		disk->d_unit = 0;
	else
		disk->d_unit =
		    TAILQ_LAST(&disk_head, disk_list)->disk->d_unit + 1;

	disk->d_flags = 0;

	if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED)
		disk->d_flags |= DISKFLAG_CANDELETE;

	if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED)
		disk->d_flags |= DISKFLAG_CANFLUSHCACHE;

/* ifdef used here to ease porting to stable branches at a later point. */
#ifdef DISKFLAG_UNMAPPED_BIO
	disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
#endif

	/*
	 * d_ident and d_descr are both far bigger than the length of either
	 *  the serial or model number strings.
	 */
	nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns),
	    sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH);

	nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr),
	    NVME_MODEL_NUMBER_LENGTH);

#if __FreeBSD_version >= 900034
	strlcpy(disk->d_descr, descr, sizeof(descr));
#endif

	ndisk->ns = ns;
	ndisk->disk = disk;
	ndisk->cur_depth = 0;

	mtx_init(&ndisk->bioqlock, "NVD bioq lock", NULL, MTX_DEF);
	bioq_init(&ndisk->bioq);

	TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk);
	ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK,
	    taskqueue_thread_enqueue, &ndisk->tq);
	taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq");

	TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq);
	TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq);

	disk_create(disk, DISK_VERSION);

	printf(NVD_STR"%u: <%s> NVMe namespace\n", disk->d_unit, descr);
	printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit,
		(uintmax_t)disk->d_mediasize / (1024*1024),
		(uintmax_t)disk->d_mediasize / disk->d_sectorsize,
		disk->d_sectorsize);

	return (NULL);
}
Пример #5
0
/**
 * Module/ driver initialization. Creates the linux network
 * devices.
 *
 * @return Zero on success
 */
int cvm_oct_init_module(device_t bus)
{
	device_t dev;
	int ifnum;
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;

	printf("cavium-ethernet: %s\n", OCTEON_SDK_VERSION_STRING);

	/*
	 * MAC addresses for this driver start after the management
	 * ports.
	 *
	 * XXX Would be nice if __cvmx_mgmt_port_num_ports() were
	 *     not static to cvmx-mgmt-port.c.
	 */
	if (OCTEON_IS_MODEL(OCTEON_CN56XX))
		cvm_oct_mac_addr_offset = 1;
	else if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
		cvm_oct_mac_addr_offset = 2;
	else
		cvm_oct_mac_addr_offset = 0;

	cvm_oct_rx_initialize();
	cvm_oct_configure_common_hw(bus);

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = 0; port < num_ports; port++) {
			cvmx_pip_prt_tagx_t pip_prt_tagx;
			int pkind = cvmx_helper_get_ipd_port(interface, port);

			pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pkind));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(pkind), pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	cvm_oct_link_taskq = taskqueue_create("octe link", M_NOWAIT,
	    taskqueue_thread_enqueue, &cvm_oct_link_taskq);
	taskqueue_start_threads(&cvm_oct_link_taskq, 1, PI_NET,
	    "octe link taskq");

	/* Initialize the FAU used for counting packet buffers that need to be freed */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	ifnum = 0;
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) {
			cvm_oct_private_t *priv;
			struct ifnet *ifp;
			
			dev = BUS_ADD_CHILD(bus, 0, "octe", ifnum++);
			if (dev != NULL)
				ifp = if_alloc(IFT_ETHER);
			if (dev == NULL || ifp == NULL) {
				printf("\t\tFailed to allocate ethernet device for port %d\n", port);
				continue;
			}

			/* Initialize the device private structure. */
			device_probe(dev);
			priv = device_get_softc(dev);
			priv->dev = dev;
			priv->ifp = ifp;
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++)
				cvmx_fau_atomic_write32(priv->fau+qos*4, 0);
			TASK_INIT(&priv->link_task, 0, cvm_oct_update_link, priv);

			switch (priv->imode) {

			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon NPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				priv->init = cvm_oct_xaui_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon XAUI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon LOOP Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				priv->init = cvm_oct_sgmii_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon SGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				priv->init = cvm_oct_spi_init;
				priv->uninit = cvm_oct_spi_uninit;
				device_set_desc(dev, "Cavium Octeon SPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon RGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_GMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon GMII Ethernet");
				break;
			}

			ifp->if_softc = priv;

			if (!priv->init) {
				panic("%s: unsupported device type, need to free ifp.", __func__);
			} else
			if (priv->init(ifp) < 0) {
				printf("\t\tFailed to register ethernet device for interface %d, port %d\n",
				interface, priv->port);
				panic("%s: init failed, need to free ifp.", __func__);
			} else {
				cvm_oct_device[priv->port] = ifp;
				fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t);
			}
		}
	}

	if (INTERRUPT_LIMIT) {
		/* Set the POW timer rate to give an interrupt at most INTERRUPT_LIMIT times per second */
		cvmx_write_csr(CVMX_POW_WQ_INT_PC, cvmx_clock_get_rate(CVMX_CLOCK_CORE)/(INTERRUPT_LIMIT*16*256)<<8);

		/* Enable POW timer interrupt. It will count when there are packets available */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1ful<<24);
	} else {
		/* Enable POW interrupt when our port has at least one packet */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
	}

	callout_init(&cvm_oct_poll_timer, CALLOUT_MPSAFE);
	callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);

	return 0;
}
Пример #6
0
static int
ubt_attach(device_t dev)
{
	struct usb_attach_arg		*uaa = device_get_ivars(dev);
	struct ubt_softc		*sc = device_get_softc(dev);
	struct usb_endpoint_descriptor	*ed;
	struct usb_interface_descriptor *id;
	uint16_t			wMaxPacketSize;
	uint8_t				alt_index, i, j;
	uint8_t				iface_index[2] = { 0, 1 };

	device_set_usb_desc(dev);

	sc->sc_dev = dev;
	sc->sc_debug = NG_UBT_WARN_LEVEL;

	/* 
	 * Create Netgraph node
	 */

	if (ng_make_node_common(&typestruct, &sc->sc_node) != 0) {
		UBT_ALERT(sc, "could not create Netgraph node\n");
		return (ENXIO);
	}

	/* Name Netgraph node */
	if (ng_name_node(sc->sc_node, device_get_nameunit(dev)) != 0) {
		UBT_ALERT(sc, "could not name Netgraph node\n");
		NG_NODE_UNREF(sc->sc_node);
		return (ENXIO);
	}
	NG_NODE_SET_PRIVATE(sc->sc_node, sc);
	NG_NODE_FORCE_WRITER(sc->sc_node);

	/*
	 * Initialize device softc structure
	 */

	/* initialize locks */
	mtx_init(&sc->sc_ng_mtx, "ubt ng", NULL, MTX_DEF);
	mtx_init(&sc->sc_if_mtx, "ubt if", NULL, MTX_DEF | MTX_RECURSE);

	/* initialize packet queues */
	NG_BT_MBUFQ_INIT(&sc->sc_cmdq, UBT_DEFAULT_QLEN);
	NG_BT_MBUFQ_INIT(&sc->sc_aclq, UBT_DEFAULT_QLEN);
	NG_BT_MBUFQ_INIT(&sc->sc_scoq, UBT_DEFAULT_QLEN);

	/* initialize glue task */
	TASK_INIT(&sc->sc_task, 0, ubt_task, sc);

	/*
	 * Configure Bluetooth USB device. Discover all required USB
	 * interfaces and endpoints.
	 *
	 * USB device must present two interfaces:
	 * 1) Interface 0 that has 3 endpoints
	 *	1) Interrupt endpoint to receive HCI events
	 *	2) Bulk IN endpoint to receive ACL data
	 *	3) Bulk OUT endpoint to send ACL data
	 *
	 * 2) Interface 1 then has 2 endpoints
	 *	1) Isochronous IN endpoint to receive SCO data
 	 *	2) Isochronous OUT endpoint to send SCO data
	 *
	 * Interface 1 (with isochronous endpoints) has several alternate
	 * configurations with different packet size.
	 */

	/*
	 * For interface #1 search alternate settings, and find
	 * the descriptor with the largest wMaxPacketSize
	 */

	wMaxPacketSize = 0;
	alt_index = 0;
	i = 0;
	j = 0;
	ed = NULL;

	/* 
	 * Search through all the descriptors looking for the largest
	 * packet size:
	 */
	while ((ed = (struct usb_endpoint_descriptor *)usb_desc_foreach(
	    usbd_get_config_descriptor(uaa->device), 
	    (struct usb_descriptor *)ed))) {

		if ((ed->bDescriptorType == UDESC_INTERFACE) &&
		    (ed->bLength >= sizeof(*id))) {
			id = (struct usb_interface_descriptor *)ed;
			i = id->bInterfaceNumber;
			j = id->bAlternateSetting;
		}

		if ((ed->bDescriptorType == UDESC_ENDPOINT) &&
		    (ed->bLength >= sizeof(*ed)) &&
		    (i == 1)) {
			uint16_t temp;

			temp = UGETW(ed->wMaxPacketSize);
			if (temp > wMaxPacketSize) {
				wMaxPacketSize = temp;
				alt_index = j;
			}
		}
	}

	/* Set alt configuration on interface #1 only if we found it */
	if (wMaxPacketSize > 0 &&
	    usbd_set_alt_interface_index(uaa->device, 1, alt_index)) {
		UBT_ALERT(sc, "could not set alternate setting %d " \
			"for interface 1!\n", alt_index);
		goto detach;
	}

	/* Setup transfers for both interfaces */
	if (usbd_transfer_setup(uaa->device, iface_index, sc->sc_xfer,
			ubt_config, UBT_N_TRANSFER, sc, &sc->sc_if_mtx)) {
		UBT_ALERT(sc, "could not allocate transfers\n");
		goto detach;
	}

	/* Claim all interfaces on the device */
	for (i = 1; usbd_get_iface(uaa->device, i) != NULL; i ++)
		usbd_set_parent_iface(uaa->device, i, uaa->info.bIfaceIndex);

	return (0); /* success */

detach:
	ubt_detach(dev);

	return (ENXIO);
} /* ubt_attach */