Ejemplo n.º 1
0
/*
 * Attempt to merge two adjacent blocks (mh below mhnext).
 */
static
void
__malloc_trymerge(struct mheader *mh, struct mheader *mhnext)
{
	struct mheader *mhnextnext;

	if (mh->mh_nextblock != mhnext->mh_prevblock) {
		errx(1, "free: Heap corrupt (%p and %p inconsistent)",
		     mh, mhnext);
	}
	if (mh->mh_inuse || mhnext->mh_inuse) {
		/* can't merge */
		return;
	}

	mhnextnext = M_NEXT(mhnext);

	mh->mh_nextblock = M_MKFIELD(MBLOCKSIZE + M_SIZE(mh) +
				     MBLOCKSIZE + M_SIZE(mhnext));

	if (mhnextnext != (struct mheader *)__heaptop) {
		mhnextnext->mh_prevblock = mh->mh_nextblock;
	}

	/* Deadbeef out the memory used by the now-obsolete header */
	__malloc_deadbeef(mhnext, sizeof(struct mheader));
}
Ejemplo n.º 2
0
/**
 * Free memory. Merely a wrapper for the case that we
 * want to keep track of allocations.
 *
 * @param ptr the pointer to free
 * @param filename where in the code was the call to GNUNET_free
 * @param linenumber where in the code was the call to GNUNET_free
 */
void
GNUNET_xfree_ (void *ptr,
	       const char *filename,
	       int linenumber)
{
  GNUNET_assert_at (NULL != ptr,
		    filename,
		    linenumber);
#ifdef W32_MEM_LIMIT
  ptr = &((size_t *) ptr)[-1];
  mem_used -= *((size_t *) ptr);
#endif
#if defined(M_SIZE)
#if ENABLE_POISONING
  {
    const uint64_t baadfood = GNUNET_ntohll (0xBAADF00DBAADF00DLL);
    uint64_t *base = ptr;
    size_t s = M_SIZE (ptr);
    size_t i;

    for (i=0;i<s/8;i++)
      base[i] = baadfood;
    GNUNET_memcpy (&base[s/8], &baadfood, s % 8);
  }
#endif
#endif
  free (ptr);
}
Ejemplo n.º 3
0
/*
 * The actual free() implementation.
 */
void
free(void *x)
{
	struct mheader *mh, *mhnext, *mhprev;

	if (x==NULL) {
		/* safest practice */
		return;
	}

	/* Consistency check. */
	if (__heapbase==0 || __heaptop==0 || __heapbase > __heaptop) {
		warnx("free: Internal error - local data corrupt");
		errx(1, "free: heapbase 0x%lx; heaptop 0x%lx", 
		     (unsigned long) __heapbase, (unsigned long) __heaptop);
	}

	/* Don't allow freeing pointers that aren't on the heap. */
	if ((uintptr_t)x < __heapbase || (uintptr_t)x >= __heaptop) {
		errx(1, "free: Invalid pointer %p freed (out of range)", x);
	}

#ifdef MALLOCDEBUG
	warnx("free: about to free %p", x);
	__malloc_dump();
#endif

	mh = ((struct mheader *)x)-1;
	if (!M_OK(mh)) {
		errx(1, "free: Invalid pointer %p freed (corrupt header)", x);
	}

	if (!mh->mh_inuse) {
		errx(1, "free: Invalid pointer %p freed (already free)", x);
	}

	/* mark it free */
	mh->mh_inuse = 0;

	/* wipe it */
	__malloc_deadbeef(M_DATA(mh), M_SIZE(mh));

	/* Try merging with the block above (but not if we're at the top) */
	mhnext = M_NEXT(mh);
	if (mhnext != (struct mheader *)__heaptop) {
		__malloc_trymerge(mh, mhnext);
	}

	/* Try merging with the block below (but not if we're at the bottom) */
	if (mh != (struct mheader *)__heapbase) {
		mhprev = M_PREV(mh);
		__malloc_trymerge(mhprev, mh);
	}

#ifdef MALLOCDEBUG
	warnx("free: freed %p", x);
	__malloc_dump();
#endif
}
Ejemplo n.º 4
0
/*
 * Allocate a given length worth of mbufs and/or clusters (whatever fits
 * best) and return a pointer to the top of the allocated chain.  If an
 * existing mbuf chain is provided, then we will append the new chain
 * to the existing one but still return the top of the newly allocated
 * chain.
 */
struct mbuf *
m_getm2(struct mbuf *m, int len, int how, short type, int flags)
{
	struct mbuf *mb, *nm = NULL, *mtail = NULL;

	KASSERT(len >= 0, ("%s: len is < 0", __func__));

	/* Validate flags. */
	flags &= (M_PKTHDR | M_EOR);

	/* Packet header mbuf must be first in chain. */
	if ((flags & M_PKTHDR) && m != NULL)
		flags &= ~M_PKTHDR;

	/* Loop and append maximum sized mbufs to the chain tail. */
	while (len > 0) {
		if (len > MCLBYTES)
			mb = m_getjcl(how, type, (flags & M_PKTHDR),
			    MJUMPAGESIZE);
		else if (len >= MINCLSIZE)
			mb = m_getcl(how, type, (flags & M_PKTHDR));
		else if (flags & M_PKTHDR)
			mb = m_gethdr(how, type);
		else
			mb = m_get(how, type);

		/* Fail the whole operation if one mbuf can't be allocated. */
		if (mb == NULL) {
			if (nm != NULL)
				m_freem(nm);
			return (NULL);
		}

		/* Book keeping. */
		len -= M_SIZE(mb);
		if (mtail != NULL)
			mtail->m_next = mb;
		else
			nm = mb;
		mtail = mb;
		flags &= ~M_PKTHDR;	/* Only valid on the first mbuf. */
	}
	if (flags & M_EOR)
		mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */

	/* If mbuf was supplied, append new chain to the end of it. */
	if (m != NULL) {
		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
			;
		mtail->m_next = nm;
		mtail->m_flags &= ~M_EOR;
	} else
		m = nm;

	return (m);
}
Ejemplo n.º 5
0
/*
 * Make a new (free) block from the block passed in, leaving size
 * bytes for data in the current block. size must be a multiple of
 * MBLOCKSIZE.
 *
 * Only split if the excess space is at least twice the blocksize -
 * one blocksize to hold a header and one for data.
 */
static
void
__malloc_split(struct mheader *mh, size_t size)
{
	struct mheader *mhnext, *mhnew;
	size_t oldsize;

	if (size % MBLOCKSIZE != 0) {
		errx(1, "malloc: Internal error (size %lu passed to split)",
		     (unsigned long) size);
	}

	if (M_SIZE(mh) - size < 2*MBLOCKSIZE) {
		/* no room */
		return;
	}

	mhnext = M_NEXT(mh);

	oldsize = M_SIZE(mh);
	mh->mh_nextblock = M_MKFIELD(size + MBLOCKSIZE);
	
	mhnew = M_NEXT(mh);
	if (mhnew==mhnext) {
		errx(1, "malloc: Internal error (split screwed up?)");
	}

	mhnew->mh_prevblock = M_MKFIELD(size + MBLOCKSIZE);
	mhnew->mh_pad = 0;
	mhnew->mh_magic1 = MMAGIC;
	mhnew->mh_nextblock = M_MKFIELD(oldsize - size);
	mhnew->mh_inuse = 0;
	mhnew->mh_magic2 = MMAGIC;

	if (mhnext != (struct mheader *) __heaptop) {
		mhnext->mh_prevblock = mhnew->mh_nextblock;
	}
}
Ejemplo n.º 6
0
/*
 * Debugging print function to iterate and dump the entire heap.
 */
static
void
__malloc_dump(void)
{
	struct mheader *mh;
	uintptr_t i;
	size_t rightprevblock;

	warnx("heap: ************************************************");

	rightprevblock = 0;
	for (i=__heapbase; i<__heaptop; i += M_NEXTOFF(mh)) {
		mh = (struct mheader *) i;
		if (!M_OK(mh)) {
			errx(1, "malloc: Heap corrupt; header at 0x%lx"
			     " has bad magic bits",
			     (unsigned long) i);
		}
		if (mh->mh_prevblock != rightprevblock) {
			errx(1, "malloc: Heap corrupt; header at 0x%lx"
			     " has bad previous-block size %lu "
			     "(should be %lu)",
			     (unsigned long) i, 
			     (unsigned long) mh->mh_prevblock << MBLOCKSHIFT,
			     (unsigned long) rightprevblock << MBLOCKSHIFT);
		}
		rightprevblock = mh->mh_nextblock;

		warnx("heap: 0x%lx 0x%-6lx (next: 0x%lx) %s",
		      (unsigned long) i + MBLOCKSIZE,
		      (unsigned long) M_SIZE(mh),
		      (unsigned long) (i+M_NEXTOFF(mh)),
		      mh->mh_inuse ? "INUSE" : "FREE");
	}
	if (i!=__heaptop) {
		errx(1, "malloc: Heap corrupt; ran off end");
	}

	warnx("heap: ************************************************");
}
Ejemplo n.º 7
0
/*
 * Lesser-used path for M_PREPEND:
 * allocate new mbuf to prepend to chain,
 * copy junk along.
 */
struct mbuf *
m_prepend(struct mbuf *m, int len, int how)
{
	struct mbuf *mn;

	if (m->m_flags & M_PKTHDR)
		mn = m_gethdr(how, m->m_type);
	else
		mn = m_get(how, m->m_type);
	if (mn == NULL) {
		m_freem(m);
		return (NULL);
	}
	if (m->m_flags & M_PKTHDR)
		m_move_pkthdr(mn, m);
	mn->m_next = m;
	m = mn;
	if (len < M_SIZE(m))
		M_ALIGN(m, len);
	m->m_len = len;
	return (m);
}
Ejemplo n.º 8
0
/*
 * Sanity checks on mbuf (chain) for use in KASSERT() and general
 * debugging.
 * Returns 0 or panics when bad and 1 on all tests passed.
 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
 * blow up later.
 */
int
m_sanity(struct mbuf *m0, int sanitize)
{
	struct mbuf *m;
	caddr_t a, b;
	int pktlen = 0;

#ifdef INVARIANTS
#define	M_SANITY_ACTION(s)	panic("mbuf %p: " s, m)
#else
#define	M_SANITY_ACTION(s)	printf("mbuf %p: " s, m)
#endif

	for (m = m0; m != NULL; m = m->m_next) {
		/*
		 * Basic pointer checks.  If any of these fails then some
		 * unrelated kernel memory before or after us is trashed.
		 * No way to recover from that.
		 */
		a = M_START(m);
		b = a + M_SIZE(m);
		if ((caddr_t)m->m_data < a)
			M_SANITY_ACTION("m_data outside mbuf data range left");
		if ((caddr_t)m->m_data > b)
			M_SANITY_ACTION("m_data outside mbuf data range right");
		if ((caddr_t)m->m_data + m->m_len > b)
			M_SANITY_ACTION("m_data + m_len exeeds mbuf space");

		/* m->m_nextpkt may only be set on first mbuf in chain. */
		if (m != m0 && m->m_nextpkt != NULL) {
			if (sanitize) {
				m_freem(m->m_nextpkt);
				m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
			} else
				M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
		}

		/* packet length (not mbuf length!) calculation */
		if (m0->m_flags & M_PKTHDR)
			pktlen += m->m_len;

		/* m_tags may only be attached to first mbuf in chain. */
		if (m != m0 && m->m_flags & M_PKTHDR &&
		    !SLIST_EMPTY(&m->m_pkthdr.tags)) {
			if (sanitize) {
				m_tag_delete_chain(m, NULL);
				/* put in 0xDEADC0DE perhaps? */
			} else
				M_SANITY_ACTION("m_tags on in-chain mbuf");
		}

		/* M_PKTHDR may only be set on first mbuf in chain */
		if (m != m0 && m->m_flags & M_PKTHDR) {
			if (sanitize) {
				bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
				m->m_flags &= ~M_PKTHDR;
				/* put in 0xDEADCODE and leave hdr flag in */
			} else
				M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
		}
	}
	m = m0;
	if (pktlen && pktlen != m->m_pkthdr.len) {
		if (sanitize)
			m->m_pkthdr.len = 0;
		else
			M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
	}
	return 1;

#undef	M_SANITY_ACTION
}
Ejemplo n.º 9
0
void
tcp_pcap_add(struct tcphdr *th, struct mbuf *m, struct mbufq *queue)
{
	struct mbuf *n = NULL, *mhead;

	KASSERT(th, ("%s: called with th == NULL", __func__));
	KASSERT(m, ("%s: called with m == NULL", __func__));
	KASSERT(queue, ("%s: called with queue == NULL", __func__));

	/* We only care about data packets. */
	while (m && m->m_type != MT_DATA)
		m = m->m_next;

	/* We only need to do something if we still have an mbuf. */
	if (!m)
		return;

	/* If we are not saving mbufs, return now. */
	if (queue->mq_maxlen == 0)
		return;

	/*
	 * Check to see if we will need to recycle mbufs.
	 *
	 * If we need to get rid of mbufs to stay below
	 * our packet count, try to reuse the mbuf. Once
	 * we already have a new mbuf (n), then we can
	 * simply free subsequent mbufs.
	 *
	 * Note that most of the logic in here is to deal
	 * with the reuse. If we are fine with constant
	 * mbuf allocs/deallocs, we could ditch this logic.
	 * But, it only seems to make sense to reuse
	 * mbufs we already have.
	 */
	while (mbufq_full(queue)) {
		mhead = mbufq_dequeue(queue);

		if (n) {
			tcp_pcap_m_freem(mhead);
		}
		else {
			/*
			 * If this held an external cluster, try to
			 * detach the cluster. But, if we held the
			 * last reference, go through the normal
			 * free-ing process.
			 */
			if (mhead->m_flags & M_EXT) {
				switch (mhead->m_ext.ext_type) {
				case EXT_SFBUF:
					/* Don't mess around with these. */
					tcp_pcap_m_freem(mhead);
					continue;
				default:
					if (atomic_fetchadd_int(
						mhead->m_ext.ext_cnt, -1) == 1)
					{
						/*
						 * We held the last reference
						 * on this cluster. Restore
						 * the reference count and put
						 * it back in the pool.
				 		 */
						*(mhead->m_ext.ext_cnt) = 1;
						tcp_pcap_m_freem(mhead);
						continue;
					}
					/*
					 * We were able to cleanly free the
					 * reference.
				 	 */
					atomic_subtract_int(
					    &tcp_pcap_clusters_referenced_cur,
					    1);
					tcp_pcap_alloc_reuse_ext++;
					break;
				}
			}
			else {
				tcp_pcap_alloc_reuse_mbuf++;
			}

			n = mhead;
			tcp_pcap_m_freem(n->m_next);
			m_init(n, NULL, 0, M_NOWAIT, MT_DATA, 0);
		}
	}

	/* Check to see if we need to get a new mbuf. */
	if (!n) {
		if (!(n = m_get(M_NOWAIT, MT_DATA)))
			return;
		tcp_pcap_alloc_new_mbuf++;
	}

	/*
	 * What are we dealing with? If a cluster, attach it. Otherwise,
	 * try to copy the data from the beginning of the mbuf to the
	 * end of data. (There may be data between the start of the data
	 * area and the current data pointer. We want to get this, because
	 * it may contain header information that is useful.)
	 * In cases where that isn't possible, settle for what we can
	 * get.
	 */
	if ((m->m_flags & M_EXT) && tcp_pcap_take_cluster_reference()) {
		n->m_data = m->m_data;
		n->m_len = m->m_len;
		mb_dupcl(n, m);
	}
	else if (((m->m_data + m->m_len) - M_START(m)) <= M_SIZE(n)) {
		/*
		 * At this point, n is guaranteed to be a normal mbuf
		 * with no cluster and no packet header. Because the
		 * logic in this code block requires this, the assert
		 * is here to catch any instances where someone
		 * changes the logic to invalidate that assumption.
		 */
		KASSERT((n->m_flags & (M_EXT | M_PKTHDR)) == 0,
			("%s: Unexpected flags (%#x) for mbuf",
			__func__, n->m_flags));
		n->m_data = n->m_dat + M_LEADINGSPACE_NOWRITE(m);
		n->m_len = m->m_len;
		bcopy(M_START(m), n->m_dat,
			m->m_len + M_LEADINGSPACE_NOWRITE(m));
	}
	else {
		/*
		 * This is the case where we need to "settle for what
		 * we can get". The most probable way to this code
		 * path is that we've already taken references to the
		 * maximum number of mbuf clusters we can, and the data
		 * is too long to fit in an mbuf's internal storage.
		 * Try for a "best fit".
		 */
		tcp_pcap_copy_bestfit(th, m, n);

		/* Don't try to get additional data. */
		goto add_to_queue;
	}

	if (m->m_next) {
		n->m_next = m_copym(m->m_next, 0, M_COPYALL, M_NOWAIT);
		tcp_pcap_adj_cluster_reference(n->m_next, 1);
	}

add_to_queue:
	/* Add the new mbuf to the list. */
	if (mbufq_enqueue(queue, n)) {
		/* This shouldn't happen. If INVARIANTS is defined, panic. */
		KASSERT(0, ("%s: mbufq was unexpectedly full!", __func__));
		tcp_pcap_m_freem(n);
	}
}
Ejemplo n.º 10
0
/*
 * Copy data from m to n, where n cannot fit all the data we might
 * want from m.
 *
 * Prioritize data like this:
 * 1. TCP header
 * 2. IP header
 * 3. Data
 */
static void
tcp_pcap_copy_bestfit(struct tcphdr *th, struct mbuf *m, struct mbuf *n)
{
	struct mbuf *m_cur = m;
	int bytes_to_copy=0, trailing_data, skip=0, tcp_off;

	/* Below, we assume these will be non-NULL. */
	KASSERT(th, ("%s: called with th == NULL", __func__));
	KASSERT(m, ("%s: called with m == NULL", __func__));
	KASSERT(n, ("%s: called with n == NULL", __func__));

	/* We assume this initialization occurred elsewhere. */
	KASSERT(n->m_len == 0, ("%s: called with n->m_len=%d (expected 0)",
		__func__, n->m_len));
	KASSERT(n->m_data == M_START(n),
		("%s: called with n->m_data != M_START(n)", __func__));

	/*
	 * Calculate the size of the TCP header. We use this often
	 * enough that it is worth just calculating at the start.
	 */
	tcp_off = th->th_off << 2;

	/* Trim off leading empty mbufs. */
	while (m && m->m_len == 0)
		m = m->m_next;

	if (m) {
		m_cur = m;
	}
	else {
		/*
		 * No data? Highly unusual. We would expect to at
		 * least see a TCP header in the mbuf.
		 * As we have a pointer to the TCP header, I guess
		 * we should just copy that. (???)
		 */
fallback:
		bytes_to_copy = tcp_off;
		if (bytes_to_copy > M_SIZE(n))
			bytes_to_copy = M_SIZE(n);
		bcopy(th, n->m_data, bytes_to_copy);
		n->m_len = bytes_to_copy;
		return;
	}

	/*
	 * Find TCP header. Record the total number of bytes up to,
	 * and including, the TCP header.
	 */
	while (m_cur) {
		if ((caddr_t) th >= (caddr_t) m_cur->m_data &&
			(caddr_t) th < (caddr_t) (m_cur->m_data + m_cur->m_len))
			break;
		bytes_to_copy += m_cur->m_len;
		m_cur = m_cur->m_next;
	}
	if (m_cur)
		bytes_to_copy += (caddr_t) th - (caddr_t) m_cur->m_data;
	else
		goto fallback;
	bytes_to_copy += tcp_off;

	/*
	 * If we already want to copy more bytes than we can hold
	 * in the destination mbuf, skip leading bytes and copy
	 * what we can.
	 *
	 * Otherwise, consider trailing data.
	 */
	if (bytes_to_copy > M_SIZE(n)) {
		skip  = bytes_to_copy - M_SIZE(n);
		bytes_to_copy = M_SIZE(n);
	}
	else {
		/*
		 * Determine how much trailing data is in the chain.
		 * We start with the length of this mbuf (the one
		 * containing th) and subtract the size of the TCP
		 * header (tcp_off) and the size of the data prior
		 * to th (th - m_cur->m_data).
		 *
		 * This *should not* be negative, as the TCP code
		 * should put the whole TCP header in a single
		 * mbuf. But, it isn't a problem if it is. We will
		 * simple work off our negative balance as we look
		 * at subsequent mbufs.
		 */
		trailing_data = m_cur->m_len - tcp_off;
		trailing_data -= (caddr_t) th - (caddr_t) m_cur->m_data;
		m_cur = m_cur->m_next;
		while (m_cur) {
			trailing_data += m_cur->m_len;
			m_cur = m_cur->m_next;
		}
		if ((bytes_to_copy + trailing_data) > M_SIZE(n))
			bytes_to_copy = M_SIZE(n);
		else
			bytes_to_copy += trailing_data;
	}

	m_copydata(m, skip, bytes_to_copy, n->m_data);
	n->m_len = bytes_to_copy;
}
Ejemplo n.º 11
0
/*
 * malloc itself.
 */
void *
malloc(size_t size)
{
	struct mheader *mh;
	uintptr_t i;
	size_t rightprevblock;

	if (__heapbase==0) {
		__malloc_init();
	}
	if (__heapbase==0 || __heaptop==0 || __heapbase > __heaptop) {
		warnx("malloc: Internal error - local data corrupt");
		errx(1, "malloc: heapbase 0x%lx; heaptop 0x%lx", 
		     (unsigned long) __heapbase, (unsigned long) __heaptop);
	}

#ifdef MALLOCDEBUG
	warnx("malloc: about to allocate %lu (0x%lx) bytes", 
	      (unsigned long) size, (unsigned long) size);
	__malloc_dump();
#endif

	/* Round size up to an integral number of blocks. */
	size = ((size + MBLOCKSIZE - 1) & ~(size_t)(MBLOCKSIZE-1));

	/*
	 * First-fit search algorithm for available blocks.
	 * Check to make sure the next/previous sizes all agree.
	 */
	rightprevblock = 0;
	for (i=__heapbase; i<__heaptop; i += M_NEXTOFF(mh)) {
		mh = (struct mheader *) i;
		if (!M_OK(mh)) {
			errx(1, "malloc: Heap corrupt; header at 0x%lx"
			     " has bad magic bits",
			     (unsigned long) i);
		}
		if (mh->mh_prevblock != rightprevblock) {
			errx(1, "malloc: Heap corrupt; header at 0x%lx"
			     " has bad previous-block size %lu "
			     "(should be %lu)",
			     (unsigned long) i, 
			     (unsigned long) mh->mh_prevblock << MBLOCKSHIFT,
			     (unsigned long) rightprevblock << MBLOCKSHIFT);
		}
		rightprevblock = mh->mh_nextblock;

		/* Can't allocate a block that's in use. */
		if (mh->mh_inuse) {
			continue;
		}

		/* Can't allocate a block that isn't big enough. */
		if (M_SIZE(mh) < size) {
			continue;
		}

		/* Try splitting block. */
		__malloc_split(mh, size);

		/*
		 * Now, allocate.
		 */
		mh->mh_inuse = 1;

#ifdef MALLOCDEBUG
		warnx("malloc: allocating at %p", M_DATA(mh));
		__malloc_dump();
#endif
		return M_DATA(mh);
	}
	if (i!=__heaptop) {
		errx(1, "malloc: Heap corrupt; ran off end");
	}

	/*
	 * Didn't find anything. Expand the heap.
	 */

	mh = __malloc_sbrk(size + MBLOCKSIZE);
	if (mh == NULL) {
		return NULL;
	}

	mh->mh_prevblock = rightprevblock;
	mh->mh_magic1 = MMAGIC;
	mh->mh_magic2 = MMAGIC;
	mh->mh_pad = 0;
	mh->mh_inuse = 1;
	mh->mh_nextblock = M_MKFIELD(size + MBLOCKSIZE);

#ifdef MALLOCDEBUG
	warnx("malloc: allocating at %p", M_DATA(mh));
	__malloc_dump();
#endif
	return M_DATA(mh);
}