Esempio n. 1
0
/*
 * Extract uma(9) statistics from the running kernel, and store all memory
 * type information in the passed list.  For each type, check the list for an
 * existing entry with the right name/allocator -- if present, update that
 * entry.  Otherwise, add a new entry.  On error, the entire list will be
 * cleared, as entries will be in an inconsistent state.
 *
 * To reduce the level of work for a list that starts empty, we keep around a
 * hint as to whether it was empty when we began, so we can avoid searching
 * the list for entries to update.  Updates are O(n^2) due to searching for
 * each entry before adding it.
 */
int
memstat_sysctl_uma(struct memory_type_list *list, int flags)
{
	struct uma_stream_header *ushp;
	struct uma_type_header *uthp;
	struct uma_percpu_stat *upsp;
	struct memory_type *mtp;
	int count, hint_dontsearch, i, j, maxcpus, maxid;
	char *buffer, *p;
	size_t size;

	hint_dontsearch = LIST_EMPTY(&list->mtl_list);

	/*
	 * Query the number of CPUs, number of malloc types so that we can
	 * guess an initial buffer size.  We loop until we succeed or really
	 * fail.  Note that the value of maxcpus we query using sysctl is not
	 * the version we use when processing the real data -- that is read
	 * from the header.
	 */
retry:
	size = sizeof(maxid);
	if (sysctlbyname("kern.smp.maxid", &maxid, &size, NULL, 0) < 0) {
		if (errno == EACCES || errno == EPERM)
			list->mtl_error = MEMSTAT_ERROR_PERMISSION;
		else
			list->mtl_error = MEMSTAT_ERROR_DATAERROR;
		return (-1);
	}
	if (size != sizeof(maxid)) {
		list->mtl_error = MEMSTAT_ERROR_DATAERROR;
		return (-1);
	}

	size = sizeof(count);
	if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) {
		if (errno == EACCES || errno == EPERM)
			list->mtl_error = MEMSTAT_ERROR_PERMISSION;
		else
			list->mtl_error = MEMSTAT_ERROR_VERSION;
		return (-1);
	}
	if (size != sizeof(count)) {
		list->mtl_error = MEMSTAT_ERROR_DATAERROR;
		return (-1);
	}

	size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) *
	    (maxid + 1));

	buffer = malloc(size);
	if (buffer == NULL) {
		list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
		return (-1);
	}

	if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) {
		/*
		 * XXXRW: ENOMEM is an ambiguous return, we should bound the
		 * number of loops, perhaps.
		 */
		if (errno == ENOMEM) {
			free(buffer);
			goto retry;
		}
		if (errno == EACCES || errno == EPERM)
			list->mtl_error = MEMSTAT_ERROR_PERMISSION;
		else
			list->mtl_error = MEMSTAT_ERROR_VERSION;
		free(buffer);
		return (-1);
	}

	if (size == 0) {
		free(buffer);
		return (0);
	}

	if (size < sizeof(*ushp)) {
		list->mtl_error = MEMSTAT_ERROR_VERSION;
		free(buffer);
		return (-1);
	}
	p = buffer;
	ushp = (struct uma_stream_header *)p;
	p += sizeof(*ushp);

	if (ushp->ush_version != UMA_STREAM_VERSION) {
		list->mtl_error = MEMSTAT_ERROR_VERSION;
		free(buffer);
		return (-1);
	}

	/*
	 * For the remainder of this function, we are quite trusting about
	 * the layout of structures and sizes, since we've determined we have
	 * a matching version and acceptable CPU count.
	 */
	maxcpus = ushp->ush_maxcpus;
	count = ushp->ush_count;
	for (i = 0; i < count; i++) {
		uthp = (struct uma_type_header *)p;
		p += sizeof(*uthp);

		if (hint_dontsearch == 0) {
			mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
			    uthp->uth_name);
		} else
			mtp = NULL;
		if (mtp == NULL)
			mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
			    uthp->uth_name, maxid + 1);
		if (mtp == NULL) {
			_memstat_mtl_empty(list);
			free(buffer);
			list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
			return (-1);
		}

		/*
		 * Reset the statistics on a current node.
		 */
		_memstat_mt_reset_stats(mtp, maxid + 1);

		mtp->mt_numallocs = uthp->uth_allocs;
		mtp->mt_numfrees = uthp->uth_frees;
		mtp->mt_failures = uthp->uth_fails;
		mtp->mt_sleeps = uthp->uth_sleeps;

		for (j = 0; j < maxcpus; j++) {
			upsp = (struct uma_percpu_stat *)p;
			p += sizeof(*upsp);

			mtp->mt_percpu_cache[j].mtp_free =
			    upsp->ups_cache_free;
			mtp->mt_free += upsp->ups_cache_free;
			mtp->mt_numallocs += upsp->ups_allocs;
			mtp->mt_numfrees += upsp->ups_frees;
		}

		mtp->mt_size = uthp->uth_size;
		mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size;
		mtp->mt_memfreed = mtp->mt_numfrees * uthp->uth_size;
		mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
		mtp->mt_countlimit = uthp->uth_limit;
		mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size;

		mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
		mtp->mt_zonefree = uthp->uth_zone_free;

		/*
		 * UMA secondary zones share a keg with the primary zone.  To
		 * avoid double-reporting of free items, report keg free
		 * items only in the primary zone.
		 */
		if (!(uthp->uth_zone_flags & UTH_ZONE_SECONDARY)) {
			mtp->mt_kegfree = uthp->uth_keg_free;
			mtp->mt_free += mtp->mt_kegfree;
		}
		mtp->mt_free += mtp->mt_zonefree;
	}

	free(buffer);

	return (0);
}
Esempio n. 2
0
int
memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
{
	struct memory_type *mtp;
	void *kmemstatistics;
	int hint_dontsearch, j, mp_maxcpus, ret;
	char name[MEMTYPE_MAXNAME];
	struct malloc_type_stats *mts, *mtsp;
	struct malloc_type_internal *mtip;
	struct malloc_type type, *typep;
	kvm_t *kvm;

	kvm = (kvm_t *)kvm_handle;

	hint_dontsearch = LIST_EMPTY(&list->mtl_list);

	if (kvm_nlist(kvm, namelist) != 0) {
		list->mtl_error = MEMSTAT_ERROR_KVM;
		return (-1);
	}

	if (namelist[X_KMEMSTATISTICS].n_type == 0 ||
	    namelist[X_KMEMSTATISTICS].n_value == 0) {
		list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
		return (-1);
	}

	ret = kread_symbol(kvm, X_MP_MAXCPUS, &mp_maxcpus,
	    sizeof(mp_maxcpus), 0);
	if (ret != 0) {
		list->mtl_error = ret;
		return (-1);
	}

	ret = kread_symbol(kvm, X_KMEMSTATISTICS, &kmemstatistics,
	    sizeof(kmemstatistics), 0);
	if (ret != 0) {
		list->mtl_error = ret;
		return (-1);
	}

	mts = malloc(sizeof(struct malloc_type_stats) * mp_maxcpus);
	if (mts == NULL) {
		list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
		return (-1);
	}

	for (typep = kmemstatistics; typep != NULL; typep = type.ks_next) {
		ret = kread(kvm, typep, &type, sizeof(type), 0);
		if (ret != 0) {
			_memstat_mtl_empty(list);
			free(mts);
			list->mtl_error = ret;
			return (-1);
		}
		ret = kread_string(kvm, (void *)type.ks_shortdesc, name,
		    MEMTYPE_MAXNAME);
		if (ret != 0) {
			_memstat_mtl_empty(list);
			free(mts);
			list->mtl_error = ret;
			return (-1);
		}

		/*
		 * Since our compile-time value for MAXCPU may differ from the
		 * kernel's, we populate our own array.
		 */
		mtip = type.ks_handle;
		ret = kread(kvm, mtip->mti_stats, mts, mp_maxcpus *
		    sizeof(struct malloc_type_stats), 0);
		if (ret != 0) {
			_memstat_mtl_empty(list);
			free(mts);
			list->mtl_error = ret;
			return (-1);
		}

		if (hint_dontsearch == 0) {
			mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, name);
		} else
			mtp = NULL;
		if (mtp == NULL)
			mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
			    name, mp_maxcpus);
		if (mtp == NULL) {
			_memstat_mtl_empty(list);
			free(mts);
			list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
			return (-1);
		}

		/*
		 * This logic is replicated from kern_malloc.c, and should
		 * be kept in sync.
		 */
		_memstat_mt_reset_stats(mtp, mp_maxcpus);
		for (j = 0; j < mp_maxcpus; j++) {
			mtsp = &mts[j];
			mtp->mt_memalloced += mtsp->mts_memalloced;
			mtp->mt_memfreed += mtsp->mts_memfreed;
			mtp->mt_numallocs += mtsp->mts_numallocs;
			mtp->mt_numfrees += mtsp->mts_numfrees;
			mtp->mt_sizemask |= mtsp->mts_size;

			mtp->mt_percpu_alloc[j].mtp_memalloced =
			    mtsp->mts_memalloced;
			mtp->mt_percpu_alloc[j].mtp_memfreed =
			    mtsp->mts_memfreed;
			mtp->mt_percpu_alloc[j].mtp_numallocs =
			    mtsp->mts_numallocs;
			mtp->mt_percpu_alloc[j].mtp_numfrees =
			    mtsp->mts_numfrees;
			mtp->mt_percpu_alloc[j].mtp_sizemask =
			    mtsp->mts_size;
		}

		mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
		mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
	}

	return (0);
}
Esempio n. 3
0
/*
 * Extract malloc(9) statistics from the running kernel, and store all memory
 * type information in the passed list.  For each type, check the list for an
 * existing entry with the right name/allocator -- if present, update that
 * entry.  Otherwise, add a new entry.  On error, the entire list will be
 * cleared, as entries will be in an inconsistent state.
 *
 * To reduce the level of work for a list that starts empty, we keep around a
 * hint as to whether it was empty when we began, so we can avoid searching
 * the list for entries to update.  Updates are O(n^2) due to searching for
 * each entry before adding it.
 */
int
memstat_sysctl_malloc(struct memory_type_list *list, int flags)
{
	struct malloc_type_stream_header *mtshp;
	struct malloc_type_header *mthp;
	struct malloc_type_stats *mtsp;
	struct memory_type *mtp;
	int count, hint_dontsearch, i, j, maxcpus;
	char *buffer, *p;
	size_t size;

	hint_dontsearch = LIST_EMPTY(&list->mtl_list);

	/*
	 * Query the number of CPUs, number of malloc types so that we can
	 * guess an initial buffer size.  We loop until we succeed or really
	 * fail.  Note that the value of maxcpus we query using sysctl is not
	 * the version we use when processing the real data -- that is read
	 * from the header.
	 */
retry:
	size = sizeof(maxcpus);
	if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
		if (errno == EACCES || errno == EPERM)
			list->mtl_error = MEMSTAT_ERROR_PERMISSION;
		else
			list->mtl_error = MEMSTAT_ERROR_DATAERROR;
		return (-1);
	}
	if (size != sizeof(maxcpus)) {
		list->mtl_error = MEMSTAT_ERROR_DATAERROR;
		return (-1);
	}

	size = sizeof(count);
	if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) {
		if (errno == EACCES || errno == EPERM)
			list->mtl_error = MEMSTAT_ERROR_PERMISSION;
		else
			list->mtl_error = MEMSTAT_ERROR_VERSION;
		return (-1);
	}
	if (size != sizeof(count)) {
		list->mtl_error = MEMSTAT_ERROR_DATAERROR;
		return (-1);
	}

	size = sizeof(*mthp) + count * (sizeof(*mthp) + sizeof(*mtsp) *
	    maxcpus);

	buffer = malloc(size);
	if (buffer == NULL) {
		list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
		return (-1);
	}

	if (sysctlbyname("kern.malloc_stats", buffer, &size, NULL, 0) < 0) {
		/*
		 * XXXRW: ENOMEM is an ambiguous return, we should bound the
		 * number of loops, perhaps.
		 */
		if (errno == ENOMEM) {
			free(buffer);
			goto retry;
		}
		if (errno == EACCES || errno == EPERM)
			list->mtl_error = MEMSTAT_ERROR_PERMISSION;
		else
			list->mtl_error = MEMSTAT_ERROR_VERSION;
		free(buffer);
		return (-1);
	}

	if (size == 0) {
		free(buffer);
		return (0);
	}

	if (size < sizeof(*mtshp)) {
		list->mtl_error = MEMSTAT_ERROR_VERSION;
		free(buffer);
		return (-1);
	}
	p = buffer;
	mtshp = (struct malloc_type_stream_header *)p;
	p += sizeof(*mtshp);

	if (mtshp->mtsh_version != MALLOC_TYPE_STREAM_VERSION) {
		list->mtl_error = MEMSTAT_ERROR_VERSION;
		free(buffer);
		return (-1);
	}

	/*
	 * For the remainder of this function, we are quite trusting about
	 * the layout of structures and sizes, since we've determined we have
	 * a matching version and acceptable CPU count.
	 */
	maxcpus = mtshp->mtsh_maxcpus;
	count = mtshp->mtsh_count;
	for (i = 0; i < count; i++) {
		mthp = (struct malloc_type_header *)p;
		p += sizeof(*mthp);

		if (hint_dontsearch == 0) {
			mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC,
			    mthp->mth_name);
		} else
			mtp = NULL;
		if (mtp == NULL)
			mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
			    mthp->mth_name, maxcpus);
		if (mtp == NULL) {
			_memstat_mtl_empty(list);
			free(buffer);
			list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
			return (-1);
		}

		/*
		 * Reset the statistics on a current node.
		 */
		_memstat_mt_reset_stats(mtp, maxcpus);

		for (j = 0; j < maxcpus; j++) {
			mtsp = (struct malloc_type_stats *)p;
			p += sizeof(*mtsp);

			/*
			 * Sumarize raw statistics across CPUs into coalesced
			 * statistics.
			 */
			mtp->mt_memalloced += mtsp->mts_memalloced;
			mtp->mt_memfreed += mtsp->mts_memfreed;
			mtp->mt_numallocs += mtsp->mts_numallocs;
			mtp->mt_numfrees += mtsp->mts_numfrees;
			mtp->mt_sizemask |= mtsp->mts_size;

			/*
			 * Copies of per-CPU statistics.
			 */
			mtp->mt_percpu_alloc[j].mtp_memalloced =
			    mtsp->mts_memalloced;
			mtp->mt_percpu_alloc[j].mtp_memfreed =
			    mtsp->mts_memfreed;
			mtp->mt_percpu_alloc[j].mtp_numallocs =
			    mtsp->mts_numallocs;
			mtp->mt_percpu_alloc[j].mtp_numfrees =
			    mtsp->mts_numfrees;
			mtp->mt_percpu_alloc[j].mtp_sizemask =
			    mtsp->mts_size;
		}

		/*
		 * Derived cross-CPU statistics.
		 */
		mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
		mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
	}

	free(buffer);

	return (0);
}
Esempio n. 4
0
/*
 * Print mbuf statistics.
 */
void
mbpr(void *kvmd, u_long mbaddr)
{
	struct memory_type_list *mtlp;
	struct memory_type *mtp;
	uintmax_t mbuf_count, mbuf_bytes, mbuf_free, mbuf_failures, mbuf_size;
	uintmax_t cluster_count, cluster_bytes, cluster_limit, cluster_free;
	uintmax_t cluster_failures, cluster_size;
	uintmax_t packet_count, packet_bytes, packet_free, packet_failures;
	uintmax_t tag_count, tag_bytes;
	uintmax_t jumbop_count, jumbop_bytes, jumbop_limit, jumbop_free;
	uintmax_t jumbop_failures, jumbop_size;
	uintmax_t jumbo9_count, jumbo9_bytes, jumbo9_limit, jumbo9_free;
	uintmax_t jumbo9_failures, jumbo9_size;
	uintmax_t jumbo16_count, jumbo16_bytes, jumbo16_limit, jumbo16_free;
	uintmax_t jumbo16_failures, jumbo16_size;
	uintmax_t bytes_inuse, bytes_incache, bytes_total;
	int nsfbufs, nsfbufspeak, nsfbufsused;
	struct mbstat mbstat;
	size_t mlen;
	int error;

	mtlp = memstat_mtl_alloc();
	if (mtlp == NULL) {
		warn("memstat_mtl_alloc");
		return;
	}

	/*
	 * Use memstat_*_all() because some mbuf-related memory is in uma(9),
	 * and some malloc(9).
	 */
	if (live) {
		if (memstat_sysctl_all(mtlp, 0) < 0) {
			warnx("memstat_sysctl_all: %s",
			    memstat_strerror(memstat_mtl_geterror(mtlp)));
			goto out;
		}
	} else {
#ifndef __rtems__
		if (memstat_kvm_all(mtlp, kvmd) < 0) {
			error = memstat_mtl_geterror(mtlp);
			if (error == MEMSTAT_ERROR_KVM)
				warnx("memstat_kvm_all: %s",
				    kvm_geterr(kvmd));
			else
				warnx("memstat_kvm_all: %s",
				    memstat_strerror(error));
			goto out;
		}
#else
		;
#endif
	}

	mtp = memstat_mtl_find(mtlp, ALLOCATOR_UMA, MBUF_MEM_NAME);
	if (mtp == NULL) {
		warnx("memstat_mtl_find: zone %s not found", MBUF_MEM_NAME);
		goto out;
	}
	mbuf_count = memstat_get_count(mtp);
	mbuf_bytes = memstat_get_bytes(mtp);
	mbuf_free = memstat_get_free(mtp);
	mbuf_failures = memstat_get_failures(mtp);
	mbuf_size = memstat_get_size(mtp);

	mtp = memstat_mtl_find(mtlp, ALLOCATOR_UMA, MBUF_PACKET_MEM_NAME);
	if (mtp == NULL) {
		warnx("memstat_mtl_find: zone %s not found",
		    MBUF_PACKET_MEM_NAME);
		goto out;
	}
	packet_count = memstat_get_count(mtp);
	packet_bytes = memstat_get_bytes(mtp);
	packet_free = memstat_get_free(mtp);
	packet_failures = memstat_get_failures(mtp);

	mtp = memstat_mtl_find(mtlp, ALLOCATOR_UMA, MBUF_CLUSTER_MEM_NAME);
	if (mtp == NULL) {
		warnx("memstat_mtl_find: zone %s not found",
		    MBUF_CLUSTER_MEM_NAME);
		goto out;
	}
	cluster_count = memstat_get_count(mtp);
	cluster_bytes = memstat_get_bytes(mtp);
	cluster_limit = memstat_get_countlimit(mtp);
	cluster_free = memstat_get_free(mtp);
	cluster_failures = memstat_get_failures(mtp);
	cluster_size = memstat_get_size(mtp);

	mtp = memstat_mtl_find(mtlp, ALLOCATOR_MALLOC, MBUF_TAG_MEM_NAME);
	if (mtp == NULL) {
		warnx("memstat_mtl_find: malloc type %s not found",
		    MBUF_TAG_MEM_NAME);
		goto out;
	}
	tag_count = memstat_get_count(mtp);
	tag_bytes = memstat_get_bytes(mtp);

	mtp = memstat_mtl_find(mtlp, ALLOCATOR_UMA, MBUF_JUMBOP_MEM_NAME);
	if (mtp == NULL) {
		warnx("memstat_mtl_find: zone %s not found",
		    MBUF_JUMBOP_MEM_NAME);
		goto out;
	}
	jumbop_count = memstat_get_count(mtp);
	jumbop_bytes = memstat_get_bytes(mtp);
	jumbop_limit = memstat_get_countlimit(mtp);
	jumbop_free = memstat_get_free(mtp);
	jumbop_failures = memstat_get_failures(mtp);
	jumbop_size = memstat_get_size(mtp);

	mtp = memstat_mtl_find(mtlp, ALLOCATOR_UMA, MBUF_JUMBO9_MEM_NAME);
	if (mtp == NULL) {
		warnx("memstat_mtl_find: zone %s not found",
		    MBUF_JUMBO9_MEM_NAME);
		goto out;
	}
	jumbo9_count = memstat_get_count(mtp);
	jumbo9_bytes = memstat_get_bytes(mtp);
	jumbo9_limit = memstat_get_countlimit(mtp);
	jumbo9_free = memstat_get_free(mtp);
	jumbo9_failures = memstat_get_failures(mtp);
	jumbo9_size = memstat_get_size(mtp);

	mtp = memstat_mtl_find(mtlp, ALLOCATOR_UMA, MBUF_JUMBO16_MEM_NAME);
	if (mtp == NULL) {
		warnx("memstat_mtl_find: zone %s not found",
		    MBUF_JUMBO16_MEM_NAME);
		goto out;
	}
	jumbo16_count = memstat_get_count(mtp);
	jumbo16_bytes = memstat_get_bytes(mtp);
	jumbo16_limit = memstat_get_countlimit(mtp);
	jumbo16_free = memstat_get_free(mtp);
	jumbo16_failures = memstat_get_failures(mtp);
	jumbo16_size = memstat_get_size(mtp);

	printf("%ju/%ju/%ju mbufs in use (current/cache/total)\n",
	    mbuf_count + packet_count, mbuf_free + packet_free,
	    mbuf_count + packet_count + mbuf_free + packet_free);

	printf("%ju/%ju/%ju/%ju mbuf clusters in use "
	    "(current/cache/total/max)\n",
	    cluster_count - packet_free, cluster_free + packet_free,
	    cluster_count + cluster_free, cluster_limit);

	printf("%ju/%ju mbuf+clusters out of packet secondary zone in use "
	    "(current/cache)\n",
	    packet_count, packet_free);

	printf("%ju/%ju/%ju/%ju %juk (page size) jumbo clusters in use "
	    "(current/cache/total/max)\n",
	    jumbop_count, jumbop_free, jumbop_count + jumbop_free,
	    jumbop_limit, jumbop_size / 1024);

	printf("%ju/%ju/%ju/%ju 9k jumbo clusters in use "
	    "(current/cache/total/max)\n",
	    jumbo9_count, jumbo9_free, jumbo9_count + jumbo9_free,
	    jumbo9_limit);

	printf("%ju/%ju/%ju/%ju 16k jumbo clusters in use "
	    "(current/cache/total/max)\n",
	    jumbo16_count, jumbo16_free, jumbo16_count + jumbo16_free,
	    jumbo16_limit);

#if 0
	printf("%ju mbuf tags in use\n", tag_count);
#endif

	/*-
	 * Calculate in-use bytes as:
	 * - straight mbuf memory
	 * - mbuf memory in packets
	 * - the clusters attached to packets
	 * - and the rest of the non-packet-attached clusters.
	 * - m_tag memory
	 * This avoids counting the clusters attached to packets in the cache.
	 * This currently excludes sf_buf space.
	 */
	bytes_inuse =
	    mbuf_bytes +			/* straight mbuf memory */
	    packet_bytes +			/* mbufs in packets */
	    (packet_count * cluster_size) +	/* clusters in packets */
	    /* other clusters */
	    ((cluster_count - packet_count - packet_free) * cluster_size) +
	    tag_bytes +
	    (jumbop_count * jumbop_size) +	/* jumbo clusters */
	    (jumbo9_count * jumbo9_size) +
	    (jumbo16_count * jumbo16_size);

	/*
	 * Calculate in-cache bytes as:
	 * - cached straught mbufs
	 * - cached packet mbufs
	 * - cached packet clusters
	 * - cached straight clusters
	 * This currently excludes sf_buf space.
	 */
	bytes_incache =
	    (mbuf_free * mbuf_size) +		/* straight free mbufs */
	    (packet_free * mbuf_size) +		/* mbufs in free packets */
	    (packet_free * cluster_size) +	/* clusters in free packets */
	    (cluster_free * cluster_size) +	/* free clusters */
	    (jumbop_free * jumbop_size) +	/* jumbo clusters */
	    (jumbo9_free * jumbo9_size) +
	    (jumbo16_free * jumbo16_size);

	/*
	 * Total is bytes in use + bytes in cache.  This doesn't take into
	 * account various other misc data structures, overhead, etc, but
	 * gives the user something useful despite that.
	 */
	bytes_total = bytes_inuse + bytes_incache;

	printf("%juK/%juK/%juK bytes allocated to network "
	    "(current/cache/total)\n", bytes_inuse / 1024,
	    bytes_incache / 1024, bytes_total / 1024);

	printf("%ju/%ju/%ju requests for mbufs denied (mbufs/clusters/"
	    "mbuf+clusters)\n", mbuf_failures, cluster_failures,
	    packet_failures);

	printf("%ju/%ju/%ju requests for jumbo clusters denied "
	    "(%juk/9k/16k)\n", jumbop_failures, jumbo9_failures,
	    jumbo16_failures, jumbop_size / 1024);

	if (live) {
		mlen = sizeof(nsfbufs);
		if (!sysctlbyname("kern.ipc.nsfbufs", &nsfbufs, &mlen, NULL,
		    0) &&
		    !sysctlbyname("kern.ipc.nsfbufsused", &nsfbufsused,
		    &mlen, NULL, 0) &&
		    !sysctlbyname("kern.ipc.nsfbufspeak", &nsfbufspeak,
		    &mlen, NULL, 0))
			printf("%d/%d/%d sfbufs in use (current/peak/max)\n",
			    nsfbufsused, nsfbufspeak, nsfbufs);
		mlen = sizeof(mbstat);
		if (sysctlbyname("kern.ipc.mbstat", &mbstat, &mlen, NULL, 0)) {
			warn("kern.ipc.mbstat");
			goto out;
		}
	} else {
		if (kread(mbaddr, (char *)&mbstat, sizeof mbstat) != 0)
			goto out;
	}
	printf("%lu requests for sfbufs denied\n", mbstat.sf_allocfail);
	printf("%lu requests for sfbufs delayed\n", mbstat.sf_allocwait);
	printf("%lu requests for I/O initiated by sendfile\n",
	    mbstat.sf_iocnt);
	printf("%lu calls to protocol drain routines\n", mbstat.m_drain);
out:
	memstat_mtl_free(mtlp);
}