Пример #1
0
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
		      struct mlx4_en_cq **pcq,
		      int entries, int ring, enum cq_type mode,
		      int node)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_cq *cq;
	int err;

	cq = kzalloc_node(sizeof(struct mlx4_en_cq), GFP_KERNEL, node);
	if (!cq) {
		cq = kzalloc(sizeof(struct mlx4_en_cq), GFP_KERNEL);
		if (!cq) {
			en_err(priv, "Failed to allocate CW struture\n");
			return -ENOMEM;
		}
	}

	cq->size = entries;
	cq->buf_size = cq->size * mdev->dev->caps.cqe_size;

	cq->tq = taskqueue_create_fast("mlx4_en_que", M_NOWAIT,
                        taskqueue_thread_enqueue, &cq->tq);
        if (mode == RX) {
		TASK_INIT(&cq->cq_task, 0, mlx4_en_rx_que, cq);
		taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s rx cq",
				if_name(priv->dev));

	} else {
		TASK_INIT(&cq->cq_task, 0, mlx4_en_tx_que, cq);
		taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s tx cq",
				if_name(priv->dev));
	}

	cq->ring = ring;
	cq->is_tx = mode;
	spin_lock_init(&cq->lock);

	err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
				cq->buf_size, 2 * PAGE_SIZE);
	if (err)
		goto err_cq;

	err = mlx4_en_map_buffer(&cq->wqres.buf);
	if (err)
		goto err_res;

	cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
	*pcq = cq;

	return 0;

err_res:
	mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
err_cq:
	kfree(cq);
	return err;
}
Пример #2
0
int main(int argc, char **argv)
{
	struct taskqueue *t;
	struct task task;
	int retval;

	t = taskqueue_create("test", M_WAITOK, taskqueue_thread_enqueue, &t);
	if (!t) {
		kprintf("unable to create taskqueue\n");
		return 1;
	}

	retval = taskqueue_start_threads(&t,
					 4, 	/*num threads*/
					 PWAIT,	/*priority*/
					 "%s",	/* thread name */
					 "test");
	if (retval != 0) {
		kprintf("failed to create taskqueue threads\n");
		return 1;
	}

	TASK_INIT(&task, /*priority*/0, task_worker, NULL);

	retval = taskqueue_enqueue(t, &task);
	if (retval != 0) {
		kprintf("failed to enqueue task\n");
		return 1;
	}

	taskqueue_drain(t, &task);

	taskqueue_free(t);
	return 0;
}
Пример #3
0
/*
 * Initialise cache headers
 */
int
pefs_init(struct vfsconf *vfsp)
{
	PEFSDEBUG("pefs_init\n");

	LIST_INIT(&pefs_node_freelist);

	TASK_INIT(&pefs_task_freenode, 0, pefs_node_free_proc, NULL);
	pefs_taskq = taskqueue_create("pefs_taskq", M_WAITOK,
	    taskqueue_thread_enqueue, &pefs_taskq);
	taskqueue_start_threads(&pefs_taskq, 1, PVFS, "pefs taskq");

	pefs_node_zone = uma_zcreate("pefs_node", sizeof(struct pefs_node),
	    NULL, NULL, NULL, (uma_fini) bzero, UMA_ALIGN_PTR, 0);

	pefs_nodehash_tbl = hashinit(desiredvnodes / 8, M_PEFSHASH,
	    &pefs_nodehash_mask);
	pefs_nodes = 0;
	mtx_init(&pefs_node_listmtx, "pefs_node_list", NULL, MTX_DEF);

	pefs_dircache_init();
	pefs_crypto_init();

	return (0);
}
Пример #4
0
int
dmar_init_qi(struct dmar_unit *unit)
{
	uint64_t iqa;
	uint32_t ics;
	int qi_sz;

	if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
		return (0);
	unit->qi_enabled = 1;
	TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled);
	if (!unit->qi_enabled)
		return (0);

	TAILQ_INIT(&unit->tlb_flush_entries);
	TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit);
	unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
	    taskqueue_thread_enqueue, &unit->qi_taskqueue);
	taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
	    "dmar%d qi taskq", unit->unit);

	unit->inv_waitd_gen = 0;
	unit->inv_waitd_seq = 1;

	qi_sz = DMAR_IQA_QS_DEF;
	TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz);
	if (qi_sz > DMAR_IQA_QS_MAX)
		qi_sz = DMAR_IQA_QS_MAX;
	unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE;
	/* Reserve one descriptor to prevent wraparound. */
	unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ;

	/* The invalidation queue reads by DMARs are always coherent. */
	unit->inv_queue = kmem_alloc_contig(kernel_arena, unit->inv_queue_size,
	    M_WAITOK | M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
	unit->inv_waitd_seq_hw_phys = pmap_kextract(
	    (vm_offset_t)&unit->inv_waitd_seq_hw);

	DMAR_LOCK(unit);
	dmar_write8(unit, DMAR_IQT_REG, 0);
	iqa = pmap_kextract(unit->inv_queue);
	iqa |= qi_sz;
	dmar_write8(unit, DMAR_IQA_REG, iqa);
	dmar_enable_qi(unit);
	ics = dmar_read4(unit, DMAR_ICS_REG);
	if ((ics & DMAR_ICS_IWC) != 0) {
		ics = DMAR_ICS_IWC;
		dmar_write4(unit, DMAR_ICS_REG, ics);
	}
	dmar_enable_qi_intr(unit);
	DMAR_UNLOCK(unit);

	return (0);
}
Пример #5
0
static int
cfi_disk_attach(device_t dev)
{
	struct cfi_disk_softc *sc = device_get_softc(dev);

	sc->parent = device_get_softc(device_get_parent(dev));
	/* validate interface width; assumed by other code */
	if (sc->parent->sc_width != 1 &&
	    sc->parent->sc_width != 2 &&
	    sc->parent->sc_width != 4)
		return EINVAL;

	sc->disk = disk_alloc();
	if (sc->disk == NULL)
		return ENOMEM;
	sc->disk->d_name = "cfid";
	sc->disk->d_unit = device_get_unit(dev);
	sc->disk->d_open = cfi_disk_open;
	sc->disk->d_close = cfi_disk_close;
	sc->disk->d_strategy = cfi_disk_strategy;
	sc->disk->d_ioctl = cfi_disk_ioctl;
	sc->disk->d_dump = NULL;		/* NB: no dumps */
	sc->disk->d_getattr = cfi_disk_getattr;
	sc->disk->d_sectorsize = CFI_DISK_SECSIZE;
	sc->disk->d_mediasize = sc->parent->sc_size;
	sc->disk->d_maxsize = CFI_DISK_MAXIOSIZE;
	/* NB: use stripesize to hold the erase/region size */
	if (sc->parent->sc_regions) {
		/*
		 * Multiple regions, use the last one.  This is a
		 * total hack as it's (presently) used only by
		 * geom_redboot to locate the FIS directory which
		 * lies at the start of the last erase region.
		 */
		sc->disk->d_stripesize =
		    sc->parent->sc_region[sc->parent->sc_regions-1].r_blksz;
	} else
		sc->disk->d_stripesize = sc->disk->d_mediasize;
	sc->disk->d_drv1 = sc;
	disk_create(sc->disk, DISK_VERSION);

	mtx_init(&sc->qlock, "CFID I/O lock", NULL, MTX_DEF);
	bioq_init(&sc->bioq);

	sc->tq = taskqueue_create("cfid_taskq", M_NOWAIT,
		taskqueue_thread_enqueue, &sc->tq);
	taskqueue_start_threads(&sc->tq, 1, PI_DISK, "cfid taskq");

	TASK_INIT(&sc->iotask, 0, cfi_io_proc, sc);

	return 0;
}
Пример #6
0
taskq_t *
taskq_create(const char *name, int nthreads, pri_t pri, int minalloc __unused,
    int maxalloc __unused, uint_t flags)
{
	taskq_t *tq;

	if ((flags & TASKQ_THREADS_CPU_PCT) != 0)
		nthreads = MAX((mp_ncpus * nthreads) / 100, 1);

	tq = kmem_alloc(sizeof(*tq), KM_SLEEP);
	tq->tq_queue = taskqueue_create(name, M_WAITOK, taskqueue_thread_enqueue,
	    &tq->tq_queue);
	(void) taskqueue_start_threads(&tq->tq_queue, nthreads, pri, "%s", name);

	return ((taskq_t *)tq);
}
Пример #7
0
/*
 * Initialize ACPI task queue.
 */
static void
acpi_taskq_init(void *arg)
{
    int i;

    acpi_taskq = taskqueue_create_fast("acpi_task", M_NOWAIT,
	&taskqueue_thread_enqueue, &acpi_taskq);
    taskqueue_start_threads(&acpi_taskq, acpi_max_threads, PWAIT, "acpi_task");
    if (acpi_task_count > 0) {
	if (bootverbose)
	    printf("AcpiOsExecute: enqueue %d pending tasks\n",
		acpi_task_count);
	for (i = 0; i < acpi_max_tasks; i++)
	    if (atomic_cmpset_int(&acpi_tasks[i].at_flag, ACPI_TASK_USED,
		ACPI_TASK_USED | ACPI_TASK_ENQUEUED))
		taskqueue_enqueue(acpi_taskq, &acpi_tasks[i].at_task);
    }
    acpi_taskq_started = 1;
}
Пример #8
0
void
altera_sdcard_attach(struct altera_sdcard_softc *sc)
{

	ALTERA_SDCARD_LOCK_INIT(sc);
	ALTERA_SDCARD_CONDVAR_INIT(sc);
	sc->as_disk = NULL;
	bioq_init(&sc->as_bioq);
	sc->as_currentbio = NULL;
	sc->as_state = ALTERA_SDCARD_STATE_NOCARD;
	sc->as_taskqueue = taskqueue_create("altera_sdcardc taskq", M_WAITOK,
	    taskqueue_thread_enqueue, &sc->as_taskqueue);
	taskqueue_start_threads(&sc->as_taskqueue, 1, PI_DISK,
	    "altera_sdcardc%d taskqueue", sc->as_unit);
	TIMEOUT_TASK_INIT(sc->as_taskqueue, &sc->as_task, 0,
	    altera_sdcard_task, sc);

	/*
	 * Kick off timer-driven processing with a manual poll so that we
	 * synchronously detect an already-inserted SD Card during the boot or
	 * other driver attach point.
	 */
	altera_sdcard_task(sc, 1);
}
/**
 * mrsas_cam_attach:        Main entry to CAM subsystem 
 * input:                   Adapter instance soft state 
 *
 * This function is called from mrsas_attach() during initialization
 * to perform SIM allocations and XPT bus registration.  If the kernel 
 * version is 7.4 or earlier, it would also initiate a bus scan.
 */
int mrsas_cam_attach(struct mrsas_softc *sc)
{
    struct cam_devq *devq;
    int mrsas_cam_depth;

    mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS;
    
    if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
        device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
        return(ENOMEM);
    }


    /* 
     * Create SIM for bus 0 and register, also create path 
     */
    sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_poll, "mrsas", sc,
        device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
        mrsas_cam_depth, devq);
    if (sc->sim_0 == NULL){
        device_printf(sc->mrsas_dev, "Cannot register SIM\n");
        cam_simq_release(devq);
        return(ENXIO);
    }
    /* Initialize taskqueue for Event Handling */
    TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
    sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
        taskqueue_thread_enqueue, &sc->ev_tq);

    /* Run the task queue with lowest priority */
    taskqueue_start_threads(&sc->ev_tq, 1, 255, -1, "%s taskq",
        device_get_nameunit(sc->mrsas_dev));
    lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
    if (xpt_bus_register(sc->sim_0, 0) != CAM_SUCCESS)
    {
        cam_sim_free(sc->sim_0);
        cam_simq_release(devq);
        lockmgr(&sc->sim_lock, LK_RELEASE); 
        return(ENXIO);
    }
    if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
                         CAM_TARGET_WILDCARD,
                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
        xpt_bus_deregister(cam_sim_path(sc->sim_0));
        cam_sim_free(sc->sim_0);
        cam_simq_release(devq);
        lockmgr(&sc->sim_lock, LK_RELEASE); 
        return(ENXIO);
    }
    lockmgr(&sc->sim_lock, LK_RELEASE);

    /* 
     * Create SIM for bus 1 and register, also create path 
     */
    sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_poll, "mrsas", sc,
        device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
        mrsas_cam_depth, devq);
    cam_simq_release(devq);
    if (sc->sim_1 == NULL){
        device_printf(sc->mrsas_dev, "Cannot register SIM\n");
        return(ENXIO);
    }

    lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
    if (xpt_bus_register(sc->sim_1, 1) != CAM_SUCCESS){
	cam_sim_free(sc->sim_1);
        lockmgr(&sc->sim_lock, LK_RELEASE);
        return(ENXIO);
    }
    if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
                         CAM_TARGET_WILDCARD,
                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
        xpt_bus_deregister(cam_sim_path(sc->sim_1));
        cam_sim_free(sc->sim_1);
        lockmgr(&sc->sim_lock, LK_RELEASE);
        return(ENXIO);
    }
    lockmgr(&sc->sim_lock, LK_RELEASE);

#if (__FreeBSD_version <= 704000)
    if (mrsas_bus_scan(sc)){
        device_printf(sc->mrsas_dev, "Error in bus scan.\n");
        return(1);
    }
#endif
    return(0);
}
Пример #10
0
static int
athp_pci_attach(device_t dev)
{
	struct ath10k_pci *ar_pci = device_get_softc(dev);
	struct ath10k *ar = &ar_pci->sc_sc;
	int rid, i;
	int err = 0;
	int ret;

	ar->sc_dev = dev;
	ar->sc_invalid = 1;

	/* XXX TODO: initialize sc_debug from TUNABLE */
#if 0
	ar->sc_debug = ATH10K_DBG_BOOT | ATH10K_DBG_PCI | ATH10K_DBG_HTC |
	    ATH10K_DBG_PCI_DUMP | ATH10K_DBG_WMI | ATH10K_DBG_BMI | ATH10K_DBG_MAC |
	    ATH10K_DBG_WMI_PRINT | ATH10K_DBG_MGMT | ATH10K_DBG_DATA | ATH10K_DBG_HTT;
#endif
	ar->sc_psc = ar_pci;

	/* Load-time tunable/sysctl tree */
	athp_attach_sysctl(ar);

	/* Enable WMI/HTT RX for now */
	ar->sc_rx_wmi = 1;
	ar->sc_rx_htt = 1;

	/* Fetch pcie capability offset */
	ret = pci_find_cap(dev, PCIY_EXPRESS, &ar_pci->sc_cap_off);
	if (ret != 0) {
		device_printf(dev,
		    "%s: failed to find pci-express capability offset\n",
		    __func__);
		return (ret);
	}

	/*
	 * Initialise ath10k core bits.
	 */
	if (ath10k_core_init(ar) < 0)
		goto bad0;

	/*
	 * Initialise ath10k freebsd bits.
	 */
	sprintf(ar->sc_mtx_buf, "%s:def", device_get_nameunit(dev));
	mtx_init(&ar->sc_mtx, ar->sc_mtx_buf, MTX_NETWORK_LOCK,
	    MTX_DEF);

	sprintf(ar->sc_buf_mtx_buf, "%s:buf", device_get_nameunit(dev));
	mtx_init(&ar->sc_buf_mtx, ar->sc_buf_mtx_buf, "athp buf", MTX_DEF);

	sprintf(ar->sc_dma_mtx_buf, "%s:dma", device_get_nameunit(dev));
	mtx_init(&ar->sc_dma_mtx, ar->sc_dma_mtx_buf, "athp dma", MTX_DEF);

	sprintf(ar->sc_conf_mtx_buf, "%s:conf", device_get_nameunit(dev));
	mtx_init(&ar->sc_conf_mtx, ar->sc_conf_mtx_buf, "athp conf",
	    MTX_DEF | MTX_RECURSE);

	sprintf(ar_pci->ps_mtx_buf, "%s:ps", device_get_nameunit(dev));
	mtx_init(&ar_pci->ps_mtx, ar_pci->ps_mtx_buf, "athp ps", MTX_DEF);

	sprintf(ar_pci->ce_mtx_buf, "%s:ce", device_get_nameunit(dev));
	mtx_init(&ar_pci->ce_mtx, ar_pci->ce_mtx_buf, "athp ce", MTX_DEF);

	sprintf(ar->sc_data_mtx_buf, "%s:data", device_get_nameunit(dev));
	mtx_init(&ar->sc_data_mtx, ar->sc_data_mtx_buf, "athp data",
	    MTX_DEF);

	/*
	 * Initialise ath10k BMI/PCIDIAG bits.
	 */
	ret = athp_descdma_alloc(ar, &ar_pci->sc_bmi_txbuf, "bmi_msg_req",
	    4, 1024);
	ret |= athp_descdma_alloc(ar, &ar_pci->sc_bmi_rxbuf, "bmi_msg_resp",
	    4, 1024);
	if (ret != 0) {
		device_printf(dev, "%s: failed to allocate BMI TX/RX buffer\n",
		    __func__);
		goto bad0;
	}

	/*
	 * Initialise HTT descriptors/memory.
	 */
	ret = ath10k_htt_rx_alloc_desc(ar, &ar->htt);
	if (ret != 0) {
		device_printf(dev, "%s: failed to alloc HTT RX descriptors\n",
		    __func__);
		goto bad;
	}

	/* XXX here instead of in core_init because we need the lock init'ed */
	callout_init_mtx(&ar->scan.timeout, &ar->sc_data_mtx, 0);

	ar_pci->pipe_taskq = taskqueue_create("athp pipe taskq", M_NOWAIT,
	    NULL, ar_pci);
	(void) taskqueue_start_threads(&ar_pci->pipe_taskq, 1, PI_NET, "%s pipe taskq",
	    device_get_nameunit(dev));
	if (ar_pci->pipe_taskq == NULL) {
		device_printf(dev, "%s: couldn't create pipe taskq\n",
		    __func__);
		err = ENXIO;
		goto bad;
	}

	/*
	 * Look at the device/vendor ID and choose which register offset
	 * mapping to use.  This is used by a lot of the register access
	 * pieces to get the correct device-specific windows.
	 */
	ar_pci->sc_vendorid = pci_get_vendor(dev);
	ar_pci->sc_deviceid = pci_get_device(dev);
	if (athp_pci_hw_lookup(ar_pci) != 0) {
		device_printf(dev, "%s: hw lookup failed\n", __func__);
		err = ENXIO;
		goto bad;
	}

	/*
	 * Enable bus mastering.
	 */
	pci_enable_busmaster(dev);

	/*
	 * Setup memory-mapping of PCI registers.
	 */
	rid = BS_BAR;
	ar_pci->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (ar_pci->sc_sr == NULL) {
		device_printf(dev, "cannot map register space\n");
		err = ENXIO;
		goto bad;
	}

	/* Driver copy; hopefully we can delete this */
	ar->sc_st = rman_get_bustag(ar_pci->sc_sr);
	ar->sc_sh = rman_get_bushandle(ar_pci->sc_sr);

	/* Local copy for bus operations */
	ar_pci->sc_st = rman_get_bustag(ar_pci->sc_sr);
	ar_pci->sc_sh = rman_get_bushandle(ar_pci->sc_sr);

	/*
	 * Mark device invalid so any interrupts (shared or otherwise)
	 * that arrive before the HAL is setup are discarded.
	 */
	ar->sc_invalid = 1;

	printf("%s: msicount=%d, msixcount=%d\n",
	    __func__,
	    pci_msi_count(dev),
	    pci_msix_count(dev));

	/*
	 * Arrange interrupt line.
	 *
	 * XXX TODO: this is effictively ath10k_pci_init_irq().
	 * Refactor it out later.
	 *
	 * First - attempt MSI.  If we get it, then use it.
	 */
	i = MSI_NUM_REQUEST;
	if (pci_alloc_msi(dev, &i) == 0) {
		device_printf(dev, "%s: %d MSI interrupts\n", __func__, i);
		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
	} else {
		i = 1;
		if (pci_alloc_msi(dev, &i) == 0) {
			device_printf(dev, "%s: 1 MSI interrupt\n", __func__);
			ar_pci->num_msi_intrs = 1;
		} else {
			device_printf(dev, "%s: legacy interrupts\n", __func__);
			ar_pci->num_msi_intrs = 0;
		}
	}
	err = ath10k_pci_request_irq(ar_pci);
	if (err != 0)
		goto bad1;

	/*
	 * Attach register ops - needed for the caller to do register IO.
	 */
	ar->sc_regio.reg_read = athp_pci_regio_read_reg;
	ar->sc_regio.reg_write = athp_pci_regio_write_reg;
	ar->sc_regio.reg_s_read = athp_pci_regio_s_read_reg;
	ar->sc_regio.reg_s_write = athp_pci_regio_s_write_reg;
	ar->sc_regio.reg_flush = athp_pci_regio_flush_reg;
	ar->sc_regio.reg_arg = ar_pci;

	/*
	 * TODO: abstract this out to be a bus/hif specific
	 * attach path.
	 *
	 * I'm not sure what USB/SDIO will look like here, but
	 * I'm pretty sure it won't involve PCI/CE setup.
	 * It'll still have WME/HIF/BMI, but it'll be done over
	 * USB endpoints.
	 */

	if (athp_pci_setup_bufs(ar_pci) != 0) {
		err = ENXIO;
		goto bad4;
	}

	/* HIF ops attach */
	ar->hif.ops = &ath10k_pci_hif_ops;
	ar->hif.bus = ATH10K_BUS_PCI;

	/* Alloc pipes */
	ret = ath10k_pci_alloc_pipes(ar);
	if (ret) {
		device_printf(ar->sc_dev, "%s: pci_alloc_pipes failed: %d\n",
		    __func__,
		    ret);
		/* XXX cleanup */
		err = ENXIO;
		goto bad4;
	}

	/* deinit ce */
	ath10k_pci_ce_deinit(ar);

	/* disable irq */
	ret = ath10k_pci_irq_disable(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: irq_disable failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* init IRQ */
	ret = ath10k_pci_init_irq(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: init_irq failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* Ok, gate open the interrupt handler */
	ar->sc_invalid = 0;

	/* pci_chip_reset */
	ret = ath10k_pci_chip_reset(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: chip_reset failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* read SoC/chip version */
	ar->sc_chipid = athp_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS(ar->sc_regofs));

	/* Verify chip version is something we can use */
	device_printf(ar->sc_dev, "%s: chipid: 0x%08x\n", __func__, ar->sc_chipid);
	if (! ath10k_pci_chip_is_supported(ar_pci->sc_deviceid, ar->sc_chipid)) {
		device_printf(ar->sc_dev,
		    "%s: unsupported chip; chipid: 0x%08x\n", __func__,
		    ar->sc_chipid);
		err = ENXIO;
		goto bad4;
	}

	/* Call main attach method with given info */
	ar->sc_preinit_hook.ich_func = athp_attach_preinit;
	ar->sc_preinit_hook.ich_arg = ar;
	if (config_intrhook_establish(&ar->sc_preinit_hook) != 0) {
		device_printf(ar->sc_dev,
		    "%s: couldn't establish preinit hook\n", __func__);
		goto bad4;
	}

	return (0);

	/* Fallthrough for setup failure */
bad4:
	athp_pci_free_bufs(ar_pci);
	/* Ensure we disable interrupts from the device */
	ath10k_pci_deinit_irq(ar_pci);
	ath10k_pci_free_irq(ar_pci);
bad1:
	bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, ar_pci->sc_sr);
bad:

	ath10k_htt_rx_free_desc(ar, &ar->htt);

	athp_descdma_free(ar, &ar_pci->sc_bmi_txbuf);
	athp_descdma_free(ar, &ar_pci->sc_bmi_rxbuf);

	/* XXX disable busmaster? */
	mtx_destroy(&ar_pci->ps_mtx);
	mtx_destroy(&ar_pci->ce_mtx);
	mtx_destroy(&ar->sc_conf_mtx);
	mtx_destroy(&ar->sc_data_mtx);
	mtx_destroy(&ar->sc_buf_mtx);
	mtx_destroy(&ar->sc_dma_mtx);
	mtx_destroy(&ar->sc_mtx);
	if (ar_pci->pipe_taskq) {
		taskqueue_drain_all(ar_pci->pipe_taskq);
		taskqueue_free(ar_pci->pipe_taskq);
	}

	/* Shutdown ioctl handler */
	athp_ioctl_teardown(ar);

	ath10k_core_destroy(ar);
bad0:
	return (err);
}
Пример #11
0
static void *
nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg)
{
	uint8_t			descr[NVME_MODEL_NUMBER_LENGTH+1];
	struct nvd_disk		*ndisk;
	struct disk		*disk;
	struct nvd_controller	*ctrlr = ctrlr_arg;

	ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK);

	disk = disk_alloc();
	disk->d_strategy = nvd_strategy;
	disk->d_ioctl = nvd_ioctl;
	disk->d_name = NVD_STR;
	disk->d_drv1 = ndisk;

	disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns);
	disk->d_sectorsize = nvme_ns_get_sector_size(ns);
	disk->d_mediasize = (off_t)nvme_ns_get_size(ns);

	if (TAILQ_EMPTY(&disk_head))
		disk->d_unit = 0;
	else
		disk->d_unit =
		    TAILQ_LAST(&disk_head, disk_list)->disk->d_unit + 1;

	disk->d_flags = 0;

	if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED)
		disk->d_flags |= DISKFLAG_CANDELETE;

	if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED)
		disk->d_flags |= DISKFLAG_CANFLUSHCACHE;

/* ifdef used here to ease porting to stable branches at a later point. */
#ifdef DISKFLAG_UNMAPPED_BIO
	disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
#endif

	/*
	 * d_ident and d_descr are both far bigger than the length of either
	 *  the serial or model number strings.
	 */
	nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns),
	    sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH);

	nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr),
	    NVME_MODEL_NUMBER_LENGTH);

#if __FreeBSD_version >= 900034
	strlcpy(disk->d_descr, descr, sizeof(descr));
#endif

	ndisk->ns = ns;
	ndisk->disk = disk;
	ndisk->cur_depth = 0;

	mtx_init(&ndisk->bioqlock, "NVD bioq lock", NULL, MTX_DEF);
	bioq_init(&ndisk->bioq);

	TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk);
	ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK,
	    taskqueue_thread_enqueue, &ndisk->tq);
	taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq");

	TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq);
	TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq);

	disk_create(disk, DISK_VERSION);

	printf(NVD_STR"%u: <%s> NVMe namespace\n", disk->d_unit, descr);
	printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit,
		(uintmax_t)disk->d_mediasize / (1024*1024),
		(uintmax_t)disk->d_mediasize / disk->d_sectorsize,
		disk->d_sectorsize);

	return (NULL);
}
Пример #12
0
int
create_geom_disk(struct nand_chip *chip)
{
	struct disk *ndisk, *rdisk;

	/* Create the disk device */
	ndisk = disk_alloc();
	ndisk->d_strategy = nand_strategy;
	ndisk->d_ioctl = nand_ioctl;
	ndisk->d_getattr = nand_getattr;
	ndisk->d_name = "gnand";
	ndisk->d_drv1 = chip;
	ndisk->d_maxsize = chip->chip_geom.block_size;
	ndisk->d_sectorsize = chip->chip_geom.page_size;
	ndisk->d_mediasize = chip->chip_geom.chip_size;
	ndisk->d_unit = chip->num +
	    10 * device_get_unit(device_get_parent(chip->dev));

	/*
	 * When using BBT, make two last blocks of device unavailable
	 * to user (because those are used to store BBT table).
	 */
	if (chip->bbt != NULL)
		ndisk->d_mediasize -= (2 * chip->chip_geom.block_size);

	ndisk->d_flags = DISKFLAG_CANDELETE;

	snprintf(ndisk->d_ident, sizeof(ndisk->d_ident),
	    "nand: Man:0x%02x Dev:0x%02x", chip->id.man_id, chip->id.dev_id);
	ndisk->d_rotation_rate = DISK_RR_NON_ROTATING;

	disk_create(ndisk, DISK_VERSION);

	/* Create the RAW disk device */
	rdisk = disk_alloc();
	rdisk->d_strategy = nand_strategy_raw;
	rdisk->d_ioctl = nand_ioctl;
	rdisk->d_getattr = nand_getattr;
	rdisk->d_name = "gnand.raw";
	rdisk->d_drv1 = chip;
	rdisk->d_maxsize = chip->chip_geom.block_size;
	rdisk->d_sectorsize = chip->chip_geom.page_size;
	rdisk->d_mediasize = chip->chip_geom.chip_size;
	rdisk->d_unit = chip->num +
	    10 * device_get_unit(device_get_parent(chip->dev));

	rdisk->d_flags = DISKFLAG_CANDELETE;

	snprintf(rdisk->d_ident, sizeof(rdisk->d_ident),
	    "nand_raw: Man:0x%02x Dev:0x%02x", chip->id.man_id,
	    chip->id.dev_id);
	rdisk->d_rotation_rate = DISK_RR_NON_ROTATING;

	disk_create(rdisk, DISK_VERSION);

	chip->ndisk = ndisk;
	chip->rdisk = rdisk;

	mtx_init(&chip->qlock, "NAND I/O lock", NULL, MTX_DEF);
	bioq_init(&chip->bioq);

	TASK_INIT(&chip->iotask, 0, nand_io_proc, chip);
	chip->tq = taskqueue_create("nand_taskq", M_WAITOK,
	    taskqueue_thread_enqueue, &chip->tq);
	taskqueue_start_threads(&chip->tq, 1, PI_DISK, "nand taskq");

	if (bootverbose)
		device_printf(chip->dev, "Created gnand%d for chip [0x%0x, "
		    "0x%0x]\n", ndisk->d_unit, chip->id.man_id,
		    chip->id.dev_id);

	return (0);
}
Пример #13
0
int
smc_attach(device_t dev)
{
	int			type, error;
	uint16_t		val;
	u_char			eaddr[ETHER_ADDR_LEN];
	struct smc_softc	*sc;
	struct ifnet		*ifp;

	sc = device_get_softc(dev);
	error = 0;

	sc->smc_dev = dev;

	ifp = sc->smc_ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		error = ENOSPC;
		goto done;
	}

	mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);

	/* Set up watchdog callout. */
	callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0);

	type = SYS_RES_IOPORT;
	if (sc->smc_usemem)
		type = SYS_RES_MEMORY;

	sc->smc_reg_rid = 0;
	sc->smc_reg = bus_alloc_resource(dev, type, &sc->smc_reg_rid, 0, ~0,
	    16, RF_ACTIVE);
	if (sc->smc_reg == NULL) {
		error = ENXIO;
		goto done;
	}

	sc->smc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 0,
	    ~0, 1, RF_ACTIVE | RF_SHAREABLE);
	if (sc->smc_irq == NULL) {
		error = ENXIO;
		goto done;
	}

	SMC_LOCK(sc);
	smc_reset(sc);
	SMC_UNLOCK(sc);

	smc_select_bank(sc, 3);
	val = smc_read_2(sc, REV);
	sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT;
	sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT;
	if (bootverbose)
		device_printf(dev, "revision %x\n", sc->smc_rev);

	callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx,
	    CALLOUT_RETURNUNLOCKED);
	if (sc->smc_chip >= REV_CHIP_91110FD) {
		(void)mii_attach(dev, &sc->smc_miibus, ifp,
		    smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK,
		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
		if (sc->smc_miibus != NULL) {
			sc->smc_mii_tick = smc_mii_tick;
			sc->smc_mii_mediachg = smc_mii_mediachg;
			sc->smc_mii_mediaioctl = smc_mii_mediaioctl;
		}
	}

	smc_select_bank(sc, 1);
	eaddr[0] = smc_read_1(sc, IAR0);
	eaddr[1] = smc_read_1(sc, IAR1);
	eaddr[2] = smc_read_1(sc, IAR2);
	eaddr[3] = smc_read_1(sc, IAR3);
	eaddr[4] = smc_read_1(sc, IAR4);
	eaddr[5] = smc_read_1(sc, IAR5);

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_init = smc_init;
	ifp->if_ioctl = smc_ioctl;
	ifp->if_start = smc_start;
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capabilities = ifp->if_capenable = 0;

#ifdef DEVICE_POLLING
	ifp->if_capabilities |= IFCAP_POLLING;
#endif

	ether_ifattach(ifp, eaddr);

	/* Set up taskqueue */
	TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp);
	TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp);
	TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp);
	sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT,
	    taskqueue_thread_enqueue, &sc->smc_tq);
	taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq",
	    device_get_nameunit(sc->smc_dev));

	/* Mask all interrupts. */
	sc->smc_mask = 0;
	smc_write_1(sc, MSK, 0);

	/* Wire up interrupt */
	error = bus_setup_intr(dev, sc->smc_irq,
	    INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih);
	if (error != 0)
		goto done;

done:
	if (error != 0)
		smc_detach(dev);
	return (error);
}
Пример #14
0
/*
 * Attach/setup the common net80211 state.  Called by
 * the driver on attach to prior to creating any vap's.
 */
void
ieee80211_ifattach(struct ieee80211com *ic,
	const uint8_t macaddr[IEEE80211_ADDR_LEN])
{
	struct ifnet *ifp = ic->ic_ifp;
	struct sockaddr_dl *sdl;
	struct ifaddr *ifa;

	KASSERT(ifp->if_type == IFT_IEEE80211, ("if_type %d", ifp->if_type));

	TAILQ_INIT(&ic->ic_vaps);

	/* Create a taskqueue for all state changes */
	ic->ic_tq = taskqueue_create("ic_taskq", M_WAITOK | M_ZERO,
	    taskqueue_thread_enqueue, &ic->ic_tq);
	taskqueue_start_threads(&ic->ic_tq, 1, TDPRI_KERN_DAEMON, -1,
	    "%s taskq", ifp->if_xname);
	/*
	 * Fill in 802.11 available channel set, mark all
	 * available channels as active, and pick a default
	 * channel if not already specified.
	 */
	ieee80211_media_init(ic);

	ic->ic_update_mcast = null_update_mcast;
	ic->ic_update_promisc = null_update_promisc;

	ic->ic_hash_key = karc4random();
	ic->ic_bintval = IEEE80211_BINTVAL_DEFAULT;
	ic->ic_lintval = ic->ic_bintval;
	ic->ic_txpowlimit = IEEE80211_TXPOWER_MAX;

	ieee80211_crypto_attach(ic);
	ieee80211_node_attach(ic);
	ieee80211_power_attach(ic);
	ieee80211_proto_attach(ic);
#ifdef IEEE80211_SUPPORT_SUPERG
	ieee80211_superg_attach(ic);
#endif
	ieee80211_ht_attach(ic);
	ieee80211_scan_attach(ic);
	ieee80211_regdomain_attach(ic);
	ieee80211_dfs_attach(ic);

	ieee80211_sysctl_attach(ic);

	ifp->if_addrlen = IEEE80211_ADDR_LEN;
	ifp->if_hdrlen = 0;
	if_attach(ifp, NULL);
	ifp->if_mtu = IEEE80211_MTU_MAX;
	ifp->if_broadcastaddr = ieee80211broadcastaddr;
	ifp->if_output = null_output;
	ifp->if_input = null_input;	/* just in case */
	ifp->if_resolvemulti = NULL;	/* NB: callers check */

	ifa = ifaddr_byindex(ifp->if_index);
	KASSERT(ifa != NULL, ("%s: no lladdr!", __func__));
	sdl = (struct sockaddr_dl *)ifa->ifa_addr;
	sdl->sdl_type = IFT_ETHER;		/* XXX IFT_IEEE80211? */
	sdl->sdl_alen = IEEE80211_ADDR_LEN;
	IEEE80211_ADDR_COPY(LLADDR(sdl), macaddr);
//	IFAFREE(ifa);
}
Пример #15
0
static int
vtblk_attach(device_t dev)
{
	struct vtblk_softc *sc;
	struct virtio_blk_config blkcfg;
	int error;

	sc = device_get_softc(dev);
	sc->vtblk_dev = dev;

	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));

	bioq_init(&sc->vtblk_bioq);
	TAILQ_INIT(&sc->vtblk_req_free);
	TAILQ_INIT(&sc->vtblk_req_ready);

	virtio_set_feature_desc(dev, vtblk_feature_desc);
	vtblk_negotiate_features(sc);

	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;

	/* Get local copy of config. */
	virtio_read_device_config(dev, 0, &blkcfg,
	    sizeof(struct virtio_blk_config));

	/*
	 * With the current sglist(9) implementation, it is not easy
	 * for us to support a maximum segment size as adjacent
	 * segments are coalesced. For now, just make sure it's larger
	 * than the maximum supported transfer size.
	 */
	if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
		if (blkcfg.size_max < MAXPHYS) {
			error = ENOTSUP;
			device_printf(dev, "host requires unsupported "
			    "maximum segment size feature\n");
			goto fail;
		}
	}

	sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
	if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
		error = EINVAL;
		device_printf(dev, "fewer than minimum number of segments "
		    "allowed: %d\n", sc->vtblk_max_nsegs);
		goto fail;
	}

	sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
	if (sc->vtblk_sglist == NULL) {
		error = ENOMEM;
		device_printf(dev, "cannot allocate sglist\n");
		goto fail;
	}

	error = vtblk_alloc_virtqueue(sc);
	if (error) {
		device_printf(dev, "cannot allocate virtqueue\n");
		goto fail;
	}

	error = vtblk_alloc_requests(sc);
	if (error) {
		device_printf(dev, "cannot preallocate requests\n");
		goto fail;
	}

	vtblk_alloc_disk(sc, &blkcfg);

	TASK_INIT(&sc->vtblk_intr_task, 0, vtblk_intr_task, sc);
	sc->vtblk_tq = taskqueue_create_fast("vtblk_taskq", M_NOWAIT,
	    taskqueue_thread_enqueue, &sc->vtblk_tq);
	if (sc->vtblk_tq == NULL) {
		error = ENOMEM;
		device_printf(dev, "cannot allocate taskqueue\n");
		goto fail;
	}

	error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
	if (error) {
		device_printf(dev, "cannot setup virtqueue interrupt\n");
		goto fail;
	}

	taskqueue_start_threads(&sc->vtblk_tq, 1, PI_DISK, "%s taskq",
	    device_get_nameunit(dev));

	vtblk_create_disk(sc);

	virtqueue_enable_intr(sc->vtblk_vq);

fail:
	if (error)
		vtblk_detach(dev);

	return (error);
}
Пример #16
0
static int
axgbe_attach(device_t dev)
{
	struct axgbe_softc *sc;
	struct ifnet *ifp;
	pcell_t phy_handle;
	device_t phydev;
	phandle_t node, phy_node;
	struct resource *mac_res[11];
	struct resource *phy_res[4];
	ssize_t len;
	int error, i, j;

	sc = device_get_softc(dev);

	node = ofw_bus_get_node(dev);
	if (OF_getencprop(node, "phy-handle", &phy_handle,
	    sizeof(phy_handle)) <= 0) {
		phy_node = node;

		if (bus_alloc_resources(dev, mac_spec, mac_res)) {
			device_printf(dev,
			    "could not allocate phy resources\n");
			return (ENXIO);
		}

		sc->prv.xgmac_res = mac_res[0];
		sc->prv.xpcs_res = mac_res[1];
		sc->prv.rxtx_res = mac_res[2];
		sc->prv.sir0_res = mac_res[3];
		sc->prv.sir1_res = mac_res[4];

		sc->prv.dev_irq_res = mac_res[5];
		sc->prv.per_channel_irq = OF_hasprop(node,
		    XGBE_DMA_IRQS_PROPERTY);
		for (i = 0, j = 6; j < nitems(mac_res) - 1 &&
		    mac_res[j + 1] != NULL; i++, j++) {
			if (sc->prv.per_channel_irq) {
				sc->prv.chan_irq_res[i] = mac_res[j];
			}
		}

		/* The last entry is the auto-negotiation interrupt */
		sc->prv.an_irq_res = mac_res[j];
	} else {
		phydev = OF_device_from_xref(phy_handle);
		phy_node = ofw_bus_get_node(phydev);

		if (bus_alloc_resources(phydev, old_phy_spec, phy_res)) {
			device_printf(dev,
			    "could not allocate phy resources\n");
			return (ENXIO);
		}

		if (bus_alloc_resources(dev, old_mac_spec, mac_res)) {
			device_printf(dev,
			    "could not allocate mac resources\n");
			return (ENXIO);
		}

		sc->prv.rxtx_res = phy_res[0];
		sc->prv.sir0_res = phy_res[1];
		sc->prv.sir1_res = phy_res[2];
		sc->prv.an_irq_res = phy_res[3];

		sc->prv.xgmac_res = mac_res[0];
		sc->prv.xpcs_res = mac_res[1];
		sc->prv.dev_irq_res = mac_res[2];
		sc->prv.per_channel_irq = OF_hasprop(node,
		    XGBE_DMA_IRQS_PROPERTY);
		if (sc->prv.per_channel_irq) {
			for (i = 0, j = 3; i < nitems(sc->prv.chan_irq_res) &&
			    mac_res[j] != NULL; i++, j++) {
				sc->prv.chan_irq_res[i] = mac_res[j];
			}
		}
	}

	if ((len = OF_getproplen(node, "mac-address")) < 0) {
		device_printf(dev, "No mac-address property\n");
		return (EINVAL);
	}

	if (len != ETHER_ADDR_LEN)
		return (EINVAL);

	OF_getprop(node, "mac-address", sc->mac_addr, ETHER_ADDR_LEN);

	sc->prv.netdev = ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(dev, "Cannot alloc ifnet\n");
		return (ENXIO);
	}

	sc->prv.dev = dev;
	sc->prv.dmat = bus_get_dma_tag(dev);
	sc->prv.phy.advertising = ADVERTISED_10000baseKR_Full |
	    ADVERTISED_1000baseKX_Full;


	/*
	 * Read the needed properties from the phy node.
	 */

	/* This is documented as optional, but Linux requires it */
	if (OF_getencprop(phy_node, XGBE_SPEEDSET_PROPERTY, &sc->prv.speed_set,
	    sizeof(sc->prv.speed_set)) <= 0) {
		device_printf(dev, "%s property is missing\n",
		    XGBE_SPEEDSET_PROPERTY);
		return (EINVAL);
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_BLWC_PROPERTY,
	    sc->prv.serdes_blwc, sizeof(sc->prv.serdes_blwc));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_blwc[0] = XGBE_SPEED_1000_BLWC;
		sc->prv.serdes_blwc[1] = XGBE_SPEED_2500_BLWC;
		sc->prv.serdes_blwc[2] = XGBE_SPEED_10000_BLWC;
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_CDR_RATE_PROPERTY,
	    sc->prv.serdes_cdr_rate, sizeof(sc->prv.serdes_cdr_rate));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_cdr_rate[0] = XGBE_SPEED_1000_CDR;
		sc->prv.serdes_cdr_rate[1] = XGBE_SPEED_2500_CDR;
		sc->prv.serdes_cdr_rate[2] = XGBE_SPEED_10000_CDR;
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_PQ_SKEW_PROPERTY,
	    sc->prv.serdes_pq_skew, sizeof(sc->prv.serdes_pq_skew));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_pq_skew[0] = XGBE_SPEED_1000_PQ;
		sc->prv.serdes_pq_skew[1] = XGBE_SPEED_2500_PQ;
		sc->prv.serdes_pq_skew[2] = XGBE_SPEED_10000_PQ;
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_TX_AMP_PROPERTY,
	    sc->prv.serdes_tx_amp, sizeof(sc->prv.serdes_tx_amp));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_tx_amp[0] = XGBE_SPEED_1000_TXAMP;
		sc->prv.serdes_tx_amp[1] = XGBE_SPEED_2500_TXAMP;
		sc->prv.serdes_tx_amp[2] = XGBE_SPEED_10000_TXAMP;
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_CFG_PROPERTY,
	    sc->prv.serdes_dfe_tap_cfg, sizeof(sc->prv.serdes_dfe_tap_cfg));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_dfe_tap_cfg[0] = XGBE_SPEED_1000_DFE_TAP_CONFIG;
		sc->prv.serdes_dfe_tap_cfg[1] = XGBE_SPEED_2500_DFE_TAP_CONFIG;
		sc->prv.serdes_dfe_tap_cfg[2] = XGBE_SPEED_10000_DFE_TAP_CONFIG;
	}

	error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_ENA_PROPERTY,
	    sc->prv.serdes_dfe_tap_ena, sizeof(sc->prv.serdes_dfe_tap_ena));
	if (error > 0) {
		return (error);
	} else if (error < 0) {
		sc->prv.serdes_dfe_tap_ena[0] = XGBE_SPEED_1000_DFE_TAP_ENABLE;
		sc->prv.serdes_dfe_tap_ena[1] = XGBE_SPEED_2500_DFE_TAP_ENABLE;
		sc->prv.serdes_dfe_tap_ena[2] = XGBE_SPEED_10000_DFE_TAP_ENABLE;
	}

	/* Check if the NIC is DMA coherent */
	sc->prv.coherent = OF_hasprop(node, "dma-coherent");
	if (sc->prv.coherent) {
		sc->prv.axdomain = XGBE_DMA_OS_AXDOMAIN;
		sc->prv.arcache = XGBE_DMA_OS_ARCACHE;
		sc->prv.awcache = XGBE_DMA_OS_AWCACHE;
	} else {
		sc->prv.axdomain = XGBE_DMA_SYS_AXDOMAIN;
		sc->prv.arcache = XGBE_DMA_SYS_ARCACHE;
		sc->prv.awcache = XGBE_DMA_SYS_AWCACHE;
	}

	/* Create the lock & workqueues */
	spin_lock_init(&sc->prv.xpcs_lock);
	sc->prv.dev_workqueue = taskqueue_create("axgbe", M_WAITOK,
	    taskqueue_thread_enqueue, &sc->prv.dev_workqueue);
	taskqueue_start_threads(&sc->prv.dev_workqueue, 1, PI_NET,
	    "axgbe taskq");

	/* Set the needed pointers */
	xgbe_init_function_ptrs_phy(&sc->prv.phy_if);
	xgbe_init_function_ptrs_dev(&sc->prv.hw_if);
	xgbe_init_function_ptrs_desc(&sc->prv.desc_if);

	/* Reset the hardware */
	sc->prv.hw_if.exit(&sc->prv);

	/* Read the hardware features */
	xgbe_get_all_hw_features(&sc->prv);

	/* Set default values */
	sc->prv.pblx8 = DMA_PBL_X8_ENABLE;
	sc->prv.tx_desc_count = XGBE_TX_DESC_CNT;
	sc->prv.tx_sf_mode = MTL_TSF_ENABLE;
	sc->prv.tx_threshold = MTL_TX_THRESHOLD_64;
	sc->prv.tx_pbl = DMA_PBL_16;
	sc->prv.tx_osp_mode = DMA_OSP_ENABLE;
	sc->prv.rx_desc_count = XGBE_RX_DESC_CNT;
	sc->prv.rx_sf_mode = MTL_RSF_DISABLE;
	sc->prv.rx_threshold = MTL_RX_THRESHOLD_64;
	sc->prv.rx_pbl = DMA_PBL_16;
	sc->prv.pause_autoneg = 1;
	sc->prv.tx_pause = 1;
	sc->prv.rx_pause = 1;
	sc->prv.phy_speed = SPEED_UNKNOWN;
	sc->prv.power_down = 0;

	/* TODO: Limit to min(ncpus, hw rings) */
	sc->prv.tx_ring_count = 1;
	sc->prv.tx_q_count = 1;
	sc->prv.rx_ring_count = 1;
	sc->prv.rx_q_count = sc->prv.hw_feat.rx_q_cnt;

	/* Init the PHY */
	sc->prv.phy_if.phy_init(&sc->prv);

	/* Set the coalescing */
	xgbe_init_rx_coalesce(&sc->prv);
	xgbe_init_tx_coalesce(&sc->prv);

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
	ifp->if_init = axgbe_init;
        ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = axgbe_ioctl;
	ifp->if_transmit = xgbe_xmit;
	ifp->if_qflush = axgbe_qflush;
	ifp->if_get_counter = axgbe_get_counter;

	/* TODO: Support HW offload */
	ifp->if_capabilities = 0;
	ifp->if_capenable = 0;
	ifp->if_hwassist = 0;

	ether_ifattach(ifp, sc->mac_addr);

	ifmedia_init(&sc->media, IFM_IMASK, axgbe_media_change,
	    axgbe_media_status);
#ifdef notyet
	ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
#endif
	ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);

	set_bit(XGBE_DOWN, &sc->prv.dev_state);

	if (xgbe_open(ifp) < 0) {
		device_printf(dev, "ndo_open failed\n");
		return (ENXIO);
	}

	return (0);
}
Пример #17
0
/**
 * Module/ driver initialization. Creates the linux network
 * devices.
 *
 * @return Zero on success
 */
int cvm_oct_init_module(device_t bus)
{
	device_t dev;
	int ifnum;
	int num_interfaces;
	int interface;
	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
	int qos;

	cvm_oct_rx_initialize();
	cvm_oct_configure_common_hw(bus);

	cvmx_helper_initialize_packet_io_global();

	/* Change the input group for all ports before input is enabled */
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = 0; port < num_ports; port++) {
			cvmx_pip_prt_tagx_t pip_prt_tagx;
			int pkind = cvmx_helper_get_ipd_port(interface, port);

			pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pkind));
			pip_prt_tagx.s.grp = pow_receive_group;
			cvmx_write_csr(CVMX_PIP_PRT_TAGX(pkind), pip_prt_tagx.u64);
		}
	}

	cvmx_helper_ipd_and_packet_input_enable();

	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));

	cvm_oct_link_taskq = taskqueue_create("octe link", M_NOWAIT,
	    taskqueue_thread_enqueue, &cvm_oct_link_taskq);
	taskqueue_start_threads(&cvm_oct_link_taskq, 1, PI_NET,
	    "octe link taskq");

	/* Initialize the FAU used for counting packet buffers that need to be freed */
	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	ifnum = 0;
	num_interfaces = cvmx_helper_get_number_of_interfaces();
	for (interface = 0; interface < num_interfaces; interface++) {
		cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface);
		int num_ports = cvmx_helper_ports_on_interface(interface);
		int port;

		for (port = cvmx_helper_get_ipd_port(interface, 0);
		     port < cvmx_helper_get_ipd_port(interface, num_ports);
		     ifnum++, port++) {
			cvm_oct_private_t *priv;
			struct ifnet *ifp;
			
			dev = BUS_ADD_CHILD(bus, 0, "octe", ifnum);
			if (dev != NULL)
				ifp = if_alloc(IFT_ETHER);
			if (dev == NULL || ifp == NULL) {
				printf("Failed to allocate ethernet device for interface %d port %d\n", interface, port);
				continue;
			}

			/* Initialize the device private structure. */
			device_probe(dev);
			priv = device_get_softc(dev);
			priv->dev = dev;
			priv->ifp = ifp;
			priv->imode = imode;
			priv->port = port;
			priv->queue = cvmx_pko_get_base_queue(priv->port);
			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
			for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++)
				cvmx_fau_atomic_write32(priv->fau+qos*4, 0);
			TASK_INIT(&priv->link_task, 0, cvm_oct_update_link, priv);

			switch (priv->imode) {

			/* These types don't support ports to IPD/PKO */
			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
			case CVMX_HELPER_INTERFACE_MODE_PCIE:
			case CVMX_HELPER_INTERFACE_MODE_PICMG:
				break;

			case CVMX_HELPER_INTERFACE_MODE_NPI:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon NPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_XAUI:
				priv->init = cvm_oct_xaui_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon XAUI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_LOOP:
				priv->init = cvm_oct_common_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon LOOP Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SGMII:
				priv->init = cvm_oct_sgmii_init;
				priv->uninit = cvm_oct_common_uninit;
				device_set_desc(dev, "Cavium Octeon SGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_SPI:
				priv->init = cvm_oct_spi_init;
				priv->uninit = cvm_oct_spi_uninit;
				device_set_desc(dev, "Cavium Octeon SPI Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_RGMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon RGMII Ethernet");
				break;

			case CVMX_HELPER_INTERFACE_MODE_GMII:
				priv->init = cvm_oct_rgmii_init;
				priv->uninit = cvm_oct_rgmii_uninit;
				device_set_desc(dev, "Cavium Octeon GMII Ethernet");
				break;
			}

			ifp->if_softc = priv;

			if (!priv->init) {
				printf("octe%d: unsupported device type interface %d, port %d\n",
				       ifnum, interface, priv->port);
				if_free(ifp);
			} else if (priv->init(ifp) != 0) {
				printf("octe%d: failed to register device for interface %d, port %d\n",
				       ifnum, interface, priv->port);
				if_free(ifp);
			} else {
				cvm_oct_device[priv->port] = ifp;
				fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t);
			}
		}
	}

	if (INTERRUPT_LIMIT) {
		/* Set the POW timer rate to give an interrupt at most INTERRUPT_LIMIT times per second */
		cvmx_write_csr(CVMX_POW_WQ_INT_PC, cvmx_clock_get_rate(CVMX_CLOCK_CORE)/(INTERRUPT_LIMIT*16*256)<<8);

		/* Enable POW timer interrupt. It will count when there are packets available */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1ful<<24);
	} else {
		/* Enable POW interrupt when our port has at least one packet */
		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
	}

	callout_init(&cvm_oct_poll_timer, 1);
	callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);

	return 0;
}
Пример #18
0
static void *
nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg)
{
	struct nvd_disk		*ndisk;
	struct disk		*disk;
	struct nvd_controller	*ctrlr = ctrlr_arg;

	ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK);

	disk = disk_alloc();
	disk->d_strategy = nvd_strategy;
	disk->d_ioctl = nvd_ioctl;
	disk->d_name = "nvd";
	disk->d_drv1 = ndisk;

	disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns);
	disk->d_sectorsize = nvme_ns_get_sector_size(ns);
	disk->d_mediasize = (off_t)nvme_ns_get_size(ns);

	if (TAILQ_EMPTY(&disk_head))
		disk->d_unit = 0;
	else
		disk->d_unit =
		    TAILQ_LAST(&disk_head, disk_list)->disk->d_unit + 1;

	disk->d_flags = 0;

	if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED)
		disk->d_flags |= DISKFLAG_CANDELETE;

	if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED)
		disk->d_flags |= DISKFLAG_CANFLUSHCACHE;

/* ifdef used here to ease porting to stable branches at a later point. */
#ifdef DISKFLAG_UNMAPPED_BIO
	disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
#endif

	strlcpy(disk->d_ident, nvme_ns_get_serial_number(ns),
	    sizeof(disk->d_ident));

#if __FreeBSD_version >= 900034
	strlcpy(disk->d_descr, nvme_ns_get_model_number(ns),
	    sizeof(disk->d_descr));
#endif

	disk_create(disk, DISK_VERSION);

	ndisk->ns = ns;
	ndisk->disk = disk;
	ndisk->cur_depth = 0;

	mtx_init(&ndisk->bioqlock, "NVD bioq lock", NULL, MTX_DEF);
	bioq_init(&ndisk->bioq);

	TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk);
	ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK,
	    taskqueue_thread_enqueue, &ndisk->tq);
	taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq");

	TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq);
	TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq);

	return (NULL);
}