Esempio n. 1
0
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
		      struct mlx4_en_cq **pcq,
		      int entries, int ring, enum cq_type mode,
		      int node)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_cq *cq;
	int err;

	cq = kzalloc_node(sizeof(struct mlx4_en_cq), GFP_KERNEL, node);
	if (!cq) {
		cq = kzalloc(sizeof(struct mlx4_en_cq), GFP_KERNEL);
		if (!cq) {
			en_err(priv, "Failed to allocate CW struture\n");
			return -ENOMEM;
		}
	}

	cq->size = entries;
	cq->buf_size = cq->size * mdev->dev->caps.cqe_size;

	cq->tq = taskqueue_create_fast("mlx4_en_que", M_NOWAIT,
                        taskqueue_thread_enqueue, &cq->tq);
        if (mode == RX) {
		TASK_INIT(&cq->cq_task, 0, mlx4_en_rx_que, cq);
		taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s rx cq",
				if_name(priv->dev));

	} else {
		TASK_INIT(&cq->cq_task, 0, mlx4_en_tx_que, cq);
		taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s tx cq",
				if_name(priv->dev));
	}

	cq->ring = ring;
	cq->is_tx = mode;
	spin_lock_init(&cq->lock);

	err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
				cq->buf_size, 2 * PAGE_SIZE);
	if (err)
		goto err_cq;

	err = mlx4_en_map_buffer(&cq->wqres.buf);
	if (err)
		goto err_res;

	cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
	*pcq = cq;

	return 0;

err_res:
	mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
err_cq:
	kfree(cq);
	return err;
}
Esempio n. 2
0
int
dmar_init_qi(struct dmar_unit *unit)
{
	uint64_t iqa;
	uint32_t ics;
	int qi_sz;

	if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
		return (0);
	unit->qi_enabled = 1;
	TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled);
	if (!unit->qi_enabled)
		return (0);

	TAILQ_INIT(&unit->tlb_flush_entries);
	TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit);
	unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
	    taskqueue_thread_enqueue, &unit->qi_taskqueue);
	taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
	    "dmar%d qi taskq", unit->unit);

	unit->inv_waitd_gen = 0;
	unit->inv_waitd_seq = 1;

	qi_sz = DMAR_IQA_QS_DEF;
	TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz);
	if (qi_sz > DMAR_IQA_QS_MAX)
		qi_sz = DMAR_IQA_QS_MAX;
	unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE;
	/* Reserve one descriptor to prevent wraparound. */
	unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ;

	/* The invalidation queue reads by DMARs are always coherent. */
	unit->inv_queue = kmem_alloc_contig(kernel_arena, unit->inv_queue_size,
	    M_WAITOK | M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
	unit->inv_waitd_seq_hw_phys = pmap_kextract(
	    (vm_offset_t)&unit->inv_waitd_seq_hw);

	DMAR_LOCK(unit);
	dmar_write8(unit, DMAR_IQT_REG, 0);
	iqa = pmap_kextract(unit->inv_queue);
	iqa |= qi_sz;
	dmar_write8(unit, DMAR_IQA_REG, iqa);
	dmar_enable_qi(unit);
	ics = dmar_read4(unit, DMAR_ICS_REG);
	if ((ics & DMAR_ICS_IWC) != 0) {
		ics = DMAR_ICS_IWC;
		dmar_write4(unit, DMAR_ICS_REG, ics);
	}
	dmar_enable_qi_intr(unit);
	DMAR_UNLOCK(unit);

	return (0);
}
Esempio n. 3
0
/*
 * Initialize ACPI task queue.
 */
static void
acpi_taskq_init(void *arg)
{
    int i;

    acpi_taskq = taskqueue_create_fast("acpi_task", M_NOWAIT,
	&taskqueue_thread_enqueue, &acpi_taskq);
    taskqueue_start_threads(&acpi_taskq, acpi_max_threads, PWAIT, "acpi_task");
    if (acpi_task_count > 0) {
	if (bootverbose)
	    printf("AcpiOsExecute: enqueue %d pending tasks\n",
		acpi_task_count);
	for (i = 0; i < acpi_max_tasks; i++)
	    if (atomic_cmpset_int(&acpi_tasks[i].at_flag, ACPI_TASK_USED,
		ACPI_TASK_USED | ACPI_TASK_ENQUEUED))
		taskqueue_enqueue(acpi_taskq, &acpi_tasks[i].at_task);
    }
    acpi_taskq_started = 1;
}
Esempio n. 4
0
int
smc_attach(device_t dev)
{
	int			type, error;
	uint16_t		val;
	u_char			eaddr[ETHER_ADDR_LEN];
	struct smc_softc	*sc;
	struct ifnet		*ifp;

	sc = device_get_softc(dev);
	error = 0;

	sc->smc_dev = dev;

	ifp = sc->smc_ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		error = ENOSPC;
		goto done;
	}

	mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);

	/* Set up watchdog callout. */
	callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0);

	type = SYS_RES_IOPORT;
	if (sc->smc_usemem)
		type = SYS_RES_MEMORY;

	sc->smc_reg_rid = 0;
	sc->smc_reg = bus_alloc_resource(dev, type, &sc->smc_reg_rid, 0, ~0,
	    16, RF_ACTIVE);
	if (sc->smc_reg == NULL) {
		error = ENXIO;
		goto done;
	}

	sc->smc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 0,
	    ~0, 1, RF_ACTIVE | RF_SHAREABLE);
	if (sc->smc_irq == NULL) {
		error = ENXIO;
		goto done;
	}

	SMC_LOCK(sc);
	smc_reset(sc);
	SMC_UNLOCK(sc);

	smc_select_bank(sc, 3);
	val = smc_read_2(sc, REV);
	sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT;
	sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT;
	if (bootverbose)
		device_printf(dev, "revision %x\n", sc->smc_rev);

	callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx,
	    CALLOUT_RETURNUNLOCKED);
	if (sc->smc_chip >= REV_CHIP_91110FD) {
		(void)mii_attach(dev, &sc->smc_miibus, ifp,
		    smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK,
		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
		if (sc->smc_miibus != NULL) {
			sc->smc_mii_tick = smc_mii_tick;
			sc->smc_mii_mediachg = smc_mii_mediachg;
			sc->smc_mii_mediaioctl = smc_mii_mediaioctl;
		}
	}

	smc_select_bank(sc, 1);
	eaddr[0] = smc_read_1(sc, IAR0);
	eaddr[1] = smc_read_1(sc, IAR1);
	eaddr[2] = smc_read_1(sc, IAR2);
	eaddr[3] = smc_read_1(sc, IAR3);
	eaddr[4] = smc_read_1(sc, IAR4);
	eaddr[5] = smc_read_1(sc, IAR5);

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_init = smc_init;
	ifp->if_ioctl = smc_ioctl;
	ifp->if_start = smc_start;
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capabilities = ifp->if_capenable = 0;

#ifdef DEVICE_POLLING
	ifp->if_capabilities |= IFCAP_POLLING;
#endif

	ether_ifattach(ifp, eaddr);

	/* Set up taskqueue */
	TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp);
	TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp);
	TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp);
	sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT,
	    taskqueue_thread_enqueue, &sc->smc_tq);
	taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq",
	    device_get_nameunit(sc->smc_dev));

	/* Mask all interrupts. */
	sc->smc_mask = 0;
	smc_write_1(sc, MSK, 0);

	/* Wire up interrupt */
	error = bus_setup_intr(dev, sc->smc_irq,
	    INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih);
	if (error != 0)
		goto done;

done:
	if (error != 0)
		smc_detach(dev);
	return (error);
}
Esempio n. 5
0
static int
vtblk_attach(device_t dev)
{
	struct vtblk_softc *sc;
	struct virtio_blk_config blkcfg;
	int error;

	sc = device_get_softc(dev);
	sc->vtblk_dev = dev;

	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));

	bioq_init(&sc->vtblk_bioq);
	TAILQ_INIT(&sc->vtblk_req_free);
	TAILQ_INIT(&sc->vtblk_req_ready);

	virtio_set_feature_desc(dev, vtblk_feature_desc);
	vtblk_negotiate_features(sc);

	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;

	/* Get local copy of config. */
	virtio_read_device_config(dev, 0, &blkcfg,
	    sizeof(struct virtio_blk_config));

	/*
	 * With the current sglist(9) implementation, it is not easy
	 * for us to support a maximum segment size as adjacent
	 * segments are coalesced. For now, just make sure it's larger
	 * than the maximum supported transfer size.
	 */
	if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
		if (blkcfg.size_max < MAXPHYS) {
			error = ENOTSUP;
			device_printf(dev, "host requires unsupported "
			    "maximum segment size feature\n");
			goto fail;
		}
	}

	sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
	if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
		error = EINVAL;
		device_printf(dev, "fewer than minimum number of segments "
		    "allowed: %d\n", sc->vtblk_max_nsegs);
		goto fail;
	}

	sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
	if (sc->vtblk_sglist == NULL) {
		error = ENOMEM;
		device_printf(dev, "cannot allocate sglist\n");
		goto fail;
	}

	error = vtblk_alloc_virtqueue(sc);
	if (error) {
		device_printf(dev, "cannot allocate virtqueue\n");
		goto fail;
	}

	error = vtblk_alloc_requests(sc);
	if (error) {
		device_printf(dev, "cannot preallocate requests\n");
		goto fail;
	}

	vtblk_alloc_disk(sc, &blkcfg);

	TASK_INIT(&sc->vtblk_intr_task, 0, vtblk_intr_task, sc);
	sc->vtblk_tq = taskqueue_create_fast("vtblk_taskq", M_NOWAIT,
	    taskqueue_thread_enqueue, &sc->vtblk_tq);
	if (sc->vtblk_tq == NULL) {
		error = ENOMEM;
		device_printf(dev, "cannot allocate taskqueue\n");
		goto fail;
	}

	error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
	if (error) {
		device_printf(dev, "cannot setup virtqueue interrupt\n");
		goto fail;
	}

	taskqueue_start_threads(&sc->vtblk_tq, 1, PI_DISK, "%s taskq",
	    device_get_nameunit(dev));

	vtblk_create_disk(sc);

	virtqueue_enable_intr(sc->vtblk_vq);

fail:
	if (error)
		vtblk_detach(dev);

	return (error);
}