示例#1
0
文件: qman.c 项目: 2asoft/freebsd
int
qman_attach(device_t dev)
{
	struct qman_softc *sc;
	t_QmParam qp;
	t_Error error;
	t_QmRevisionInfo rev;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	qman_sc = sc;

	if (XX_MallocSmartInit() != E_OK) {
		device_printf(dev, "could not initialize smart allocator.\n");
		return (ENXIO);
	}

	sched_pin();

	/* Allocate resources */
	sc->sc_rrid = 0;
	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY,
	    &sc->sc_rrid, 0, ~0, QMAN_CCSR_SIZE, RF_ACTIVE);
	if (sc->sc_rres == NULL) {
		device_printf(dev, "could not allocate memory.\n");
		goto err;
	}

	sc->sc_irid = 0;
	sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ,
	    &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE);
	if (sc->sc_ires == NULL) {
		device_printf(dev, "could not allocate error interrupt.\n");
		goto err;
	}

	if (qp_sc == NULL)
		goto err;

	dpaa_portal_map_registers(qp_sc);

	/* Initialize QMan */
	qp.guestId = NCSW_MASTER_ID;
	qp.baseAddress = rman_get_bushandle(sc->sc_rres);
	qp.swPortalsBaseAddress = rman_get_bushandle(qp_sc->sc_rres[0]);
	qp.liodn = 0;
	qp.totalNumOfFqids = QMAN_MAX_FQIDS;
	qp.fqdMemPartitionId = NCSW_MASTER_ID;
	qp.pfdrMemPartitionId = NCSW_MASTER_ID;
	qp.f_Exception = qman_exception;
	qp.h_App = sc;
	qp.errIrq = (int)sc->sc_ires;
	qp.partFqidBase = QMAN_FQID_BASE;
	qp.partNumOfFqids = QMAN_MAX_FQIDS;
	qp.partCgsBase = 0;
	qp.partNumOfCgs = 0;

	sc->sc_qh = QM_Config(&qp);
	if (sc->sc_qh == NULL) {
		device_printf(dev, "could not be configured\n");
		goto err;
	}

	error = QM_Init(sc->sc_qh);
	if (error != E_OK) {
		device_printf(dev, "could not be initialized\n");
		goto err;
	}

	error = QM_GetRevision(sc->sc_qh, &rev);
	if (error != E_OK) {
		device_printf(dev, "could not get QMan revision\n");
		goto err;
	}

	device_printf(dev, "Hardware version: %d.%d.\n",
	    rev.majorRev, rev.minorRev);

	sched_unpin();

	qman_portal_setup(sc);

	return (0);

err:
	sched_unpin();
	qman_detach(dev);
	return (ENXIO);
}
示例#2
0
文件: if_dtsec.c 项目: 2asoft/freebsd
int
dtsec_attach(device_t dev)
{
	struct dtsec_softc *sc;
	int error;
	struct ifnet *ifp;

	sc = device_get_softc(dev);

	sc->sc_dev = dev;
	sc->sc_mac_mdio_irq = NO_IRQ;
	sc->sc_eth_id = device_get_unit(dev);


	/* Check if MallocSmart allocator is ready */
	if (XX_MallocSmartInit() != E_OK)
		return (ENXIO);

	XX_TrackInit();

	/* Init locks */
	mtx_init(&sc->sc_lock, device_get_nameunit(dev),
	    "DTSEC Global Lock", MTX_DEF);

	mtx_init(&sc->sc_mii_lock, device_get_nameunit(dev),
	    "DTSEC MII Lock", MTX_DEF);

	/* Init callouts */
	callout_init(&sc->sc_tick_callout, CALLOUT_MPSAFE);

	/* Read configuraton */
	if ((error = fman_get_handle(&sc->sc_fmh)) != 0)
		return (error);

	if ((error = fman_get_muram_handle(&sc->sc_muramh)) != 0)
		return (error);

	if ((error = fman_get_bushandle(&sc->sc_fm_base)) != 0)
		return (error);

	/* Configure working mode */
	dtsec_configure_mode(sc);

	/* If we are working in regular mode configure BMAN and QMAN */
	if (sc->sc_mode == DTSEC_MODE_REGULAR) {
		/* Create RX buffer pool */
		error = dtsec_rm_pool_rx_init(sc);
		if (error != 0)
			return (EIO);

		/* Create RX frame queue range */
		error = dtsec_rm_fqr_rx_init(sc);
		if (error != 0)
			return (EIO);

		/* Create frame info pool */
		error = dtsec_rm_fi_pool_init(sc);
		if (error != 0)
			return (EIO);

		/* Create TX frame queue range */
		error = dtsec_rm_fqr_tx_init(sc);
		if (error != 0)
			return (EIO);
	}

	/* Init FMan MAC module. */
	error = dtsec_fm_mac_init(sc, sc->sc_mac_addr);
	if (error != 0) {
		dtsec_detach(dev);
		return (ENXIO);
	}

	/*
	 * XXX: All phys are connected to MDIO interface of the first dTSEC
	 * device (dTSEC0). We have to save handle to the FM_MAC instance of
	 * dTSEC0, which is used later during phy's registers accesses. Another
	 * option would be adding new property to DTS pointing to correct dTSEC
	 * instance, of which FM_MAC handle has to be used for phy's registers
	 * accesses. We did not want to add new properties to DTS, thus this
	 * quite ugly hack.
	 */
	if (sc->sc_eth_id == 0)
		dtsec_mdio_mac_handle = sc->sc_mach;
	if (sc->sc_hidden)
		return (0);

	/* Init FMan TX port */
	error = sc->sc_port_tx_init(sc, device_get_unit(sc->sc_dev));
	if (error != 0) {
		dtsec_detach(dev);
		return (ENXIO);
	}

	/* Init FMan RX port */
	error = sc->sc_port_rx_init(sc, device_get_unit(sc->sc_dev));
	if (error != 0) {
		dtsec_detach(dev);
		return (ENXIO);
	}

	/* Create network interface for upper layers */
	ifp = sc->sc_ifnet = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(sc->sc_dev, "if_alloc() failed.\n");
		dtsec_detach(dev);
		return (ENOMEM);
	}

	ifp->if_softc = sc;
	ifp->if_mtu = ETHERMTU;	/* TODO: Configure */
	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST;
	ifp->if_init = dtsec_if_init;
	ifp->if_start = dtsec_if_start;
	ifp->if_ioctl = dtsec_if_ioctl;
	ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;

	if (sc->sc_phy_addr >= 0)
		if_initname(ifp, device_get_name(sc->sc_dev),
		    device_get_unit(sc->sc_dev));
	else
		if_initname(ifp, "dtsec_phy", device_get_unit(sc->sc_dev));

	/* TODO */
#if 0
	IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1);
	ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1;
	IFQ_SET_READY(&ifp->if_snd);
#endif
	ifp->if_capabilities = 0; /* TODO: Check */
	ifp->if_capenable = ifp->if_capabilities;

	/* Attach PHY(s) */
	error = mii_attach(sc->sc_dev, &sc->sc_mii_dev, ifp, dtsec_ifmedia_upd,
	    dtsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->sc_phy_addr,
	    MII_OFFSET_ANY, 0);
	if (error) {
		device_printf(sc->sc_dev, "attaching PHYs failed: %d\n", error);
		dtsec_detach(sc->sc_dev);
		return (error);
	}
	sc->sc_mii = device_get_softc(sc->sc_mii_dev);

	/* Attach to stack */
	ether_ifattach(ifp, sc->sc_mac_addr);

	return (0);
}
示例#3
0
int
dpaa_portal_alloc_res(device_t dev, struct dpaa_portals_devinfo *di, int cpu)
{
	struct dpaa_portals_softc *sc = device_get_softc(dev);
	struct resource_list_entry *rle;
	int err;
	struct resource_list *res;

	/* Check if MallocSmart allocator is ready */
	if (XX_MallocSmartInit() != E_OK)
		return (ENXIO);

	res = &di->di_res;

	/*
	 * Allocate memory.
	 * Reserve only one pair of CE/CI virtual memory regions
	 * for all CPUs, in order to save the space.
	 */
	if (sc->sc_rres[0] == NULL) {
		/* Cache enabled area */
		rle = resource_list_find(res, SYS_RES_MEMORY, 0);
		sc->sc_rrid[0] = 0;
		sc->sc_rres[0] = bus_alloc_resource(dev,
		    SYS_RES_MEMORY, &sc->sc_rrid[0], rle->start + sc->sc_dp_pa,
		    rle->end + sc->sc_dp_pa, rle->count, RF_ACTIVE);
		if (sc->sc_rres[0] == NULL) {
			device_printf(dev,
			    "Could not allocate cache enabled memory.\n");
			return (ENXIO);
		}
		tlb1_set_entry(rman_get_bushandle(sc->sc_rres[0]),
		    rle->start + sc->sc_dp_pa, rle->count, _TLB_ENTRY_MEM);
		/* Cache inhibited area */
		rle = resource_list_find(res, SYS_RES_MEMORY, 1);
		sc->sc_rrid[1] = 1;
		sc->sc_rres[1] = bus_alloc_resource(dev,
		    SYS_RES_MEMORY, &sc->sc_rrid[1], rle->start + sc->sc_dp_pa,
		    rle->end + sc->sc_dp_pa, rle->count, RF_ACTIVE);
		if (sc->sc_rres[1] == NULL) {
			device_printf(dev,
			    "Could not allocate cache inhibited memory.\n");
			bus_release_resource(dev, SYS_RES_MEMORY,
			    sc->sc_rrid[0], sc->sc_rres[0]);
			return (ENXIO);
		}
		tlb1_set_entry(rman_get_bushandle(sc->sc_rres[1]),
		    rle->start + sc->sc_dp_pa, rle->count, _TLB_ENTRY_IO);
		sc->sc_dp[cpu].dp_regs_mapped = 1;
	}
	/* Acquire portal's CE_PA and CI_PA */
	rle = resource_list_find(res, SYS_RES_MEMORY, 0);
	sc->sc_dp[cpu].dp_ce_pa = rle->start + sc->sc_dp_pa;
	sc->sc_dp[cpu].dp_ce_size = rle->count;
	rle = resource_list_find(res, SYS_RES_MEMORY, 1);
	sc->sc_dp[cpu].dp_ci_pa = rle->start + sc->sc_dp_pa;
	sc->sc_dp[cpu].dp_ci_size = rle->count;

	/* Allocate interrupts */
	rle = resource_list_find(res, SYS_RES_IRQ, 0);
	sc->sc_dp[cpu].dp_irid = 0;
	sc->sc_dp[cpu].dp_ires = bus_alloc_resource(dev,
	    SYS_RES_IRQ, &sc->sc_dp[cpu].dp_irid, rle->start, rle->end,
	    rle->count, RF_ACTIVE);
	/* Save interrupt number for later use */
	sc->sc_dp[cpu].dp_intr_num = rle->start;

	if (sc->sc_dp[cpu].dp_ires == NULL) {
		device_printf(dev, "Could not allocate irq.\n");
		return (ENXIO);
	}

	err = XX_PreallocAndBindIntr((int)sc->sc_dp[cpu].dp_ires, cpu);

	if (err != E_OK) {
		device_printf(dev, "Could not prealloc and bind interrupt\n");
		bus_release_resource(dev, SYS_RES_IRQ,
		    sc->sc_dp[cpu].dp_irid, sc->sc_dp[cpu].dp_ires);
		sc->sc_dp[cpu].dp_ires = NULL;
		return (ENXIO);
	}

#if 0
	err = bus_generic_config_intr(dev, rle->start, di->di_intr_trig,
	    di->di_intr_pol);
	if (err != 0) {
		device_printf(dev, "Could not configure interrupt\n");
		bus_release_resource(dev, SYS_RES_IRQ,
		    sc->sc_dp[cpu].dp_irid, sc->sc_dp[cpu].dp_ires);
		sc->sc_dp[cpu].dp_ires = NULL;
		return (err);
	}
#endif

	return (0);
}
示例#4
0
int
bman_attach(device_t dev)
{
	struct bman_softc *sc;
	t_BmRevisionInfo rev;
	t_Error error;
	t_BmParam bp;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	bman_sc = sc;

	/* Check if MallocSmart allocator is ready */
	if (XX_MallocSmartInit() != E_OK)
		return (ENXIO);

	/* Allocate resources */
	sc->sc_rrid = 0;
	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY,
	    &sc->sc_rrid, 0, ~0, BMAN_CCSR_SIZE, RF_ACTIVE);
	if (sc->sc_rres == NULL)
		return (ENXIO);

	sc->sc_irid = 0;
	sc->sc_ires = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ,
	    &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE);
	if (sc->sc_ires == NULL)
		goto err;

	/* Initialize BMAN */
	memset(&bp, 0, sizeof(bp));
	bp.guestId = NCSW_MASTER_ID;
	bp.baseAddress = rman_get_bushandle(sc->sc_rres);
	bp.totalNumOfBuffers = BMAN_MAX_BUFFERS;
	bp.f_Exception = bman_exception;
	bp.h_App = sc;
	bp.errIrq = (uintptr_t)sc->sc_ires;
	bp.partBpidBase = 0;
	bp.partNumOfPools = BM_MAX_NUM_OF_POOLS;

	sc->sc_bh = BM_Config(&bp);
	if (sc->sc_bh == NULL)
		goto err;

	/* Warn if there is less than 5% free FPBR's in pool */
	error = BM_ConfigFbprThreshold(sc->sc_bh, (BMAN_MAX_BUFFERS / 8) / 20);
	if (error != E_OK)
		goto err;

	error = BM_Init(sc->sc_bh);
	if (error != E_OK)
		goto err;

	error = BM_GetRevision(sc->sc_bh, &rev);
	if (error != E_OK)
		goto err;

	device_printf(dev, "Hardware version: %d.%d.\n",
	    rev.majorRev, rev.minorRev);

	return (0);

err:
	bman_detach(dev);
	return (ENXIO);
}