示例#1
0
/*
 * Initialize the mbuf allocator.
 */
void
mbinit(void)
{
	int i;

#if DIAGNOSTIC
	if (mclsizes[0] != MCLBYTES)
		panic("mbinit: the smallest cluster size != MCLBYTES");
	if (mclsizes[nitems(mclsizes) - 1] != MAXMCLBYTES)
		panic("mbinit: the largest cluster size != MAXMCLBYTES");
#endif

	pool_init(&mbpool, MSIZE, 0, 0, 0, "mbufpl", NULL);
	pool_setipl(&mbpool, IPL_NET);
	pool_set_constraints(&mbpool, &kp_dma_contig);
	pool_setlowat(&mbpool, mblowat);

	pool_init(&mtagpool, PACKET_TAG_MAXSIZE + sizeof(struct m_tag),
	    0, 0, 0, "mtagpl", NULL);
	pool_setipl(&mtagpool, IPL_NET);

	for (i = 0; i < nitems(mclsizes); i++) {
		snprintf(mclnames[i], sizeof(mclnames[0]), "mcl%dk",
		    mclsizes[i] >> 10);
		pool_init(&mclpools[i], mclsizes[i], 0, 0, 0,
		    mclnames[i], NULL);
		pool_setipl(&mclpools[i], IPL_NET);
		pool_set_constraints(&mclpools[i], &kp_dma_contig);
		pool_setlowat(&mclpools[i], mcllowat);
	}

	nmbclust_update();
}
示例#2
0
void
dma_alloc_init(void)
{
	int i;

	for (i = 0; i < nitems(dmapools); i++) {
		snprintf(dmanames[i], sizeof(dmanames[0]), "dma%d",
		    1 << (i + DMA_BUCKET_OFFSET));
		pool_init(&dmapools[i], 1 << (i + DMA_BUCKET_OFFSET), 0, 0, 0,
		    dmanames[i], NULL);
		pool_set_constraints(&dmapools[i], &kp_dma_contig);
		pool_setipl(&dmapools[i], IPL_VM);
		/* XXX need pool_setlowat(&dmapools[i], dmalowat); */
	}
}
示例#3
0
void
vnet_attach(struct device *parent, struct device *self, void *aux)
{
	struct vnet_softc *sc = (struct vnet_softc *)self;
	struct cbus_attach_args *ca = aux;
	struct ldc_conn *lc;
	struct ifnet *ifp;

	sc->sc_bustag = ca->ca_bustag;
	sc->sc_dmatag = ca->ca_dmatag;
	sc->sc_tx_ino = ca->ca_tx_ino;
	sc->sc_rx_ino = ca->ca_rx_ino;

	printf(": ivec 0x%llx, 0x%llx", sc->sc_tx_ino, sc->sc_rx_ino);

	/*
	 * Un-configure queues before registering interrupt handlers,
	 * such that we dont get any stale LDC packets or events.
	 */
	hv_ldc_tx_qconf(ca->ca_id, 0, 0);
	hv_ldc_rx_qconf(ca->ca_id, 0, 0);

	sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_tx_ino,
	    IPL_NET, BUS_INTR_ESTABLISH_MPSAFE, vnet_tx_intr,
	    sc, sc->sc_dv.dv_xname);
	sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, sc->sc_rx_ino,
	    IPL_NET, BUS_INTR_ESTABLISH_MPSAFE, vnet_rx_intr,
	    sc, sc->sc_dv.dv_xname);
	if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) {
		printf(", can't establish interrupt\n");
		return;
	}

	lc = &sc->sc_lc;
	lc->lc_id = ca->ca_id;
	lc->lc_sc = sc;
	lc->lc_reset = vnet_ldc_reset;
	lc->lc_start = vnet_ldc_start;
	lc->lc_rx_data = vio_rx_data;

	timeout_set(&sc->sc_handshake_to, vnet_handshake, sc);
	sc->sc_peer_state = VIO_DP_STOPPED;

	lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VNET_TX_ENTRIES);
	if (lc->lc_txq == NULL) {
		printf(", can't allocate tx queue\n");
		return;
	}

	lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VNET_RX_ENTRIES);
	if (lc->lc_rxq == NULL) {
		printf(", can't allocate rx queue\n");
		goto free_txqueue;
	}

	if (OF_getprop(ca->ca_node, "local-mac-address",
	    sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) > 0)
		printf(", address %s", ether_sprintf(sc->sc_ac.ac_enaddr));

	/*
	 * Each interface gets its own pool.
	 */
	pool_init(&sc->sc_pool, 2048, 0, 0, 0, sc->sc_dv.dv_xname, NULL);
	pool_setipl(&sc->sc_pool, IPL_NET);

	ifp = &sc->sc_ac.ac_if;
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_link_state = LINK_STATE_DOWN;
	ifp->if_ioctl = vnet_ioctl;
	ifp->if_start = vnet_start;
	ifp->if_watchdog = vnet_watchdog;
	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
	IFQ_SET_MAXLEN(&ifp->if_snd, 31); /* XXX */
	IFQ_SET_READY(&ifp->if_snd);

	ifmedia_init(&sc->sc_media, 0, vnet_media_change, vnet_media_status);
	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);

	if_attach(ifp);
	ether_ifattach(ifp);

	printf("\n");
	return;

free_txqueue:
	ldc_queue_free(sc->sc_dmatag, lc->lc_txq);
}
示例#4
0
/*
 * Initialize buffers and hash links for buffers.
 */
void
bufinit(void)
{
	u_int64_t dmapages;

	dmapages = uvm_pagecount(&dma_constraint);
	/* take away a guess at how much of this the kernel will consume */
	dmapages -= (atop(physmem) - atop(uvmexp.free));

	/*
	 * If MD code doesn't say otherwise, use up to 10% of DMA'able
	 * memory for buffers.
	 */
	if (bufcachepercent == 0)
		bufcachepercent = 10;

	/*
	 * XXX these values and their same use in kern_sysctl
	 * need to move into buf.h
	 */
	KASSERT(bufcachepercent <= 90);
	KASSERT(bufcachepercent >= 5);
	if (bufpages == 0)
		bufpages = dmapages * bufcachepercent / 100;
	if (bufpages < BCACHE_MIN)
		bufpages = BCACHE_MIN;
	KASSERT(bufpages < dmapages);

	bufhighpages = bufpages;

	/*
	 * Set the base backoff level for the buffer cache.  We will
	 * not allow uvm to steal back more than this number of pages.
	 */
	buflowpages = dmapages * 5 / 100;
	if (buflowpages < BCACHE_MIN)
		buflowpages = BCACHE_MIN;

	/*
	 * set bufbackpages to 100 pages, or 10 percent of the low water mark
	 * if we don't have that many pages.
	 */

	bufbackpages = buflowpages * 10 / 100;
	if (bufbackpages > 100)
		bufbackpages = 100;

	/*
	 * If the MD code does not say otherwise, reserve 10% of kva
	 * space for mapping buffers.
	 */
	if (bufkvm == 0)
		bufkvm = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 10;

	/*
	 * Don't use more than twice the amount of bufpages for mappings.
	 * It's twice since we map things sparsely.
	 */
	if (bufkvm > bufpages * PAGE_SIZE)
		bufkvm = bufpages * PAGE_SIZE;
	/*
	 * Round bufkvm to MAXPHYS because we allocate chunks of va space
	 * in MAXPHYS chunks.
	 */
	bufkvm &= ~(MAXPHYS - 1);

	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
	pool_setipl(&bufpool, IPL_BIO);

	bufcache_init();

	/*
	 * hmm - bufkvm is an argument because it's static, while
	 * bufpages is global because it can change while running.
 	 */
	buf_mem_init(bufkvm);

	/*
	 * Set the dirty page high water mark to be less than the low
	 * water mark for pages in the buffer cache. This ensures we
	 * can always back off by throwing away clean pages, and give
	 * ourselves a chance to write out the dirty pages eventually.
	 */
	hidirtypages = (buflowpages / 4) * 3;
	lodirtypages = buflowpages / 2;

	/*
	 * We are allowed to use up to the reserve.
	 */
	targetpages = bufpages - RESERVE_PAGES;
}