Exemplo n.º 1
0
/**
 * Add item as right-sibling of node (cannot be the root node).
 */
void
etree_add_right_sibling(etree_t *tree, void *node, void *item)
{
	node_t *n, *i;

	etree_check(tree);
	g_assert(node != NULL);
	g_assert(item != NULL);
	g_assert(etree_is_orphan(tree, item));
	g_assert(!etree_is_root(tree, node));

	n = ptr_add_offset(node, tree->offset);
	i = ptr_add_offset(item, tree->offset);

	i->parent = n->parent;
	i->sibling = n->sibling;
	n->sibling = i;

	if (NULL == i->sibling && etree_is_extended(tree)) {
		nodex_t *px = (nodex_t *) n->parent;
		px->last_child = i;
	}

	tree->count = 0;		/* Tree count is now unknown */
}
Exemplo n.º 2
0
/**
 * Prepend child to parent.
 *
 * This is always a fast operation.
 */
void
etree_prepend_child(etree_t *tree, void *parent, void *child)
{
	node_t *cn, *pn;

	etree_check(tree);
	g_assert(parent != NULL);
	g_assert(child != NULL);
	g_assert(etree_is_orphan(tree, child));

	cn = ptr_add_offset(child, tree->offset);
	pn = ptr_add_offset(parent, tree->offset);

	cn->parent = pn;
	cn->sibling = pn->child;
	pn->child = cn;

	if (etree_is_extended(tree)) {
		nodex_t *px = (nodex_t *) pn;

		if (NULL == px->last_child) {
			g_assert(NULL == cn->sibling);	/* Parent did not have any child */
			px->last_child = cn;
		}
	}

	tree->count = 0;		/* Tree count is now unknown */
}
Exemplo n.º 3
0
/**
 * Add item as left-sibling of node (cannot be the root node).
 *
 * This is inefficient if the node is not the first child (the head of the
 * sibling list) given that the siblings are linked through a one-way list.
 */
void
etree_add_left_sibling(etree_t *tree, void *node, void *item)
{
	node_t *n, *i;

	etree_check(tree);
	g_assert(node != NULL);
	g_assert(item != NULL);
	g_assert(etree_is_orphan(tree, item));
	g_assert(!etree_is_root(tree, node));

	n = ptr_add_offset(node, tree->offset);
	i = ptr_add_offset(item, tree->offset);

	i->parent = n->parent;
	i->sibling = n;

	if (n == n->parent->child) {
		/* node is first child, optimized case */
		n->parent->child = i;
	} else {
		node_t *p;

		for (p = n->parent->child; p->sibling != n; p = p->sibling)
			/* empty */;

		g_assert(p != NULL);		/* previous node found */

		p->sibling = i;
	}

	tree->count = 0;		/* Tree count is now unknown */
}
Exemplo n.º 4
0
/**
 * If not already done, initiate bitmap checking by creating all the currently
 * defined bitmaps in memory, zeroed, so that we can check that all the pages
 * flagged as used are indeed referred to by either a big key or a big value.
 *
 * @return TRUE if OK.
 */
gboolean
big_check_start(DBM *db)
{
	DBMBIG *dbg = db->big;
	long i;

	if (-1 == dbg->fd && -1 == big_open(dbg))
		return FALSE;

	if (dbg->bitcheck != NULL)
		return TRUE;

	/*
	 * The array of bitmaps is zeroed, with all the bits corresponding to each
	 * bitmap page (the bit 0) set.
	 *
	 * Looping over the big keys and values and marking their blocks set will
	 * set additional bits in these checking maps, which at the end will be
	 * compared to the ones on disk.
	 */

	dbg->bitcheck = halloc0(BIG_BLKSIZE * dbg->bitmaps);

	for (i = 0; i < dbg->bitmaps; i++) {
		bit_field_t *map = ptr_add_offset(dbg->bitcheck, i * BIG_BLKSIZE);
		
		bit_field_set(map, 0);		/* Bit 0 is for the bitmap itself */
	}

	return TRUE;
}
Exemplo n.º 5
0
/**
 * Attempt to grow the output buffer.
 *
 * @param zs		the zlib stream object
 * @param maxout	maximum length of dynamically-allocated buffer (0 = none)
 *
 * @return TRUE if we can resume processing after the output buffer was
 * expanded, FALSE if the buffer cannot be expanded (not dynamically allocated
 * or reached the maximum size).
 */
static bool
zlib_stream_grow_output(zlib_stream_t *zs, size_t maxout)
{
    if (zs->allocated) {
        z_streamp z = zs->z;

        /*
         * Limit growth if asked to do so.
         *
         * This is used when inflating, usually, to avoid being given a small
         * amount of data that will expand into megabytes...
         */

        if (maxout != 0 && zs->outlen >= maxout) {
            g_warning("%s(): reached maximum buffer size (%zu bytes): "
                      "input=%zu, output=%zu",
                      G_STRFUNC, maxout, zs->inlen, zs->outlen);
            return FALSE;	/* Cannot continue */
        }

        zs->outlen += OUT_GROW;
        zs->out = hrealloc(zs->out, zs->outlen);
        z->next_out = ptr_add_offset(zs->out, zs->outlen - OUT_GROW);
        z->avail_out = OUT_GROW;
        return TRUE;	/* Can process remaining input */
    }

    g_warning("%s(): under-estimated output buffer size: "
              "input=%zu, output=%zu", G_STRFUNC, zs->inlen, zs->outlen);

    return FALSE;		/* Cannot continue */
}
Exemplo n.º 6
0
/**
 * Initialize and load linked items into a list.
 *
 * This routine is meant to allow the creation of an expanded list from
 * homogeneous items that happen to be linked into another data structure
 * through a single pointer.
 *
 * It is useful to allow reuse of code that can process such lists, such
 * as xslist_sort(), xslist_shuffle(), etc...  It is naturally up to the
 * caller to then refetch the proper head pointer.
 *
 * @param list			the list into which we are loading items
 * @param head			first data item part of the linked list (NULL possible)
 * @param offset		the offset of the expanded link field within items
 * @param link_offset	the offset of the linking field in the chaining struct
 *
 * @return the amount of loaded items, as a convenience.
 */
size_t
xslist_load(xslist_t *list, void *head, size_t offset, size_t link_offset)
{
	xslink_t *lk, *next;
	size_t n;

	g_assert(list != NULL);
	g_assert(size_is_non_negative(offset));

	xslist_init(list, offset, link_offset);

	if G_UNLIKELY(NULL == head)
		return 0;

	lk = ptr_add_offset(head, offset);
	list->head = lk;

	for (n = 1; NULL != (next = xslist_next(list, lk)); n++, lk = next)
		/* empty */;

	list->tail = lk;
	list->count = n;

	safety_assert(xslist_length(list, list->head) == list->count);

	return n;
}
Exemplo n.º 7
0
/**
 * Read data from the pmsg list into supplied buffer.  Copied data is
 * removed from the list.
 *
 * @param slist		the pmsg list
 * @param buf		start of buffer where data must be copied
 * @param len		length of buffer
 *
 * @return amount of copied bytes.
 */
size_t
pmsg_slist_read(slist_t *slist, void *buf, size_t len)
{
	slist_iter_t *iter;
	size_t remain = len;
	void *p;

	g_assert(slist != NULL);

	iter = slist_iter_removable_on_head(slist);
	p = buf;

	while (remain != 0 && slist_iter_has_item(iter)) {
		pmsg_t *mb = slist_iter_current(iter);
		int n;

		n = pmsg_read(mb, p, remain);
		remain -= n;
		p = ptr_add_offset(p, n);
		if (0 == pmsg_size(mb)) {			/* Fully copied message */
			pmsg_free(mb);
			slist_iter_remove(iter);		/* Warning: moves to next */
		} else {
			break;		/* No need to continue on partial copy */
		}
	}
	slist_iter_free(&iter);

	return len - remain;
}
Exemplo n.º 8
0
/**
 * Internal recursive matching routine.
 */
static void *
etree_find_depth_internal(const etree_t *tree, node_t *root,
	unsigned curdepth, unsigned maxdepth, match_fn_t match, void *data)
{
	node_t *n, *next;
	void *item;

	etree_check(tree);

	item = ptr_add_offset(root, -tree->offset);

	if ((*match)(item, data))
		return item;

	if (maxdepth == curdepth)
		return NULL;

	for (n = root->child; n != NULL; n = next) {
		next = n->sibling;
		item = etree_find_depth_internal(tree, n,
			curdepth + 1, maxdepth, match, data);
		if (item != NULL)
			return item;
	}

	return NULL;
}
Exemplo n.º 9
0
/**
 * Mark blocks in the supplied vector as allocated in the checking bitmap.
 *
 * @param db		the sdbm database
 * @param bvec		vector where allocated block numbers are stored
 * @param bcnt		amount of blocks in vector
 */
static void
big_file_mark_used(DBM *db, const void *bvec, int bcnt)
{
	DBMBIG *dbg = db->big;
	const void *q;
	int n;

	if (!big_check_start(db))
		return;

	for (q = bvec, n = bcnt; n > 0; n--) {
		size_t bno = peek_be32(q);
		bit_field_t *map;
		long bmap;
		size_t bit;

		bmap = bno / BIG_BITCOUNT;			/* Bitmap handling this block */
		bit = bno & (BIG_BITCOUNT - 1);		/* Index within bitmap */
		q = const_ptr_add_offset(q, sizeof(guint32));

		/*
		 * It's because of this sanity check that we don't want to consider
		 * the bitcheck field as a huge continuous map.  Also doing that would
		 * violate the encapsulation: we're not supposed to know how bits are
		 * allocated in the field.
		 */

		if (bmap >= dbg->bitmaps)
			continue;

		map = ptr_add_offset(dbg->bitcheck, bmap * BIG_BLKSIZE);
		bit_field_set(map, bit);
	}
}
Exemplo n.º 10
0
/**
 * Randomly swap 1/128 of the array items.
 */
static void
vsort_perturb_sorted_array(void *array, size_t cnt, size_t isize)
{
	size_t n;
	size_t i;

	n = cnt / 128;

	for (i = 0; i < n; i++) {
		size_t a = random_value(cnt - 1);
		size_t b = random_value(cnt - 1);
		void *x = ptr_add_offset(array, a * isize);
		void *y = ptr_add_offset(array, b * isize);

		SWAP(x, y, isize);
	}
}
Exemplo n.º 11
0
/**
 * Make sure vector of block numbers is ordered and points to allocated data,
 * but was not already flagged as being used by another key / value.
 *
 * @param what		string describing what is being tested (key or value)
 * @param db		the sdbm database
 * @param bvec		vector where allocated block numbers are stored
 * @param bcnt		amount of blocks in vector
 *
 * @return TRUE on success.
 */
static gboolean
big_file_check(const char *what, DBM *db, const void *bvec, int bcnt)
{
	size_t prev_bno = 0;		/* 0 is invalid: it's the first bitmap */
	const void *q;
	int n;

	if (!big_check_start(db))
		return TRUE;			/* Cannot validate, assume it's OK */

	for (q = bvec, n = bcnt; n > 0; n--) {
		size_t bno = peek_be32(q);
		bit_field_t *map;
		long bmap;
		size_t bit;

		if (!big_block_is_allocated(db, bno)) {
			g_warning("sdbm: \"%s\": "
				"%s from .pag refers to unallocated block %lu in .dat",
				sdbm_name(db), what, (unsigned long) bno);
			return FALSE;
		}
		if (prev_bno != 0 && bno <= prev_bno) {
			g_warning("sdbm: \"%s\": "
				"%s from .pag lists unordered block list (corrupted file?)",
				sdbm_name(db), what);
			return FALSE;
		}
		q = const_ptr_add_offset(q, sizeof(guint32));
		prev_bno = bno;

		/*
		 * Make sure block is not used by someone else.
		 *
		 * Because we mark blocks as used in big keys and values only after
		 * we validated both the key and the value for a given pair, we cannot
		 * detect shared blocks between the key and value of a pair.
		 */

		bmap = bno / BIG_BITCOUNT;			/* Bitmap handling this block */
		bit = bno & (BIG_BITCOUNT - 1);		/* Index within bitmap */

		g_assert(bmap < db->big->bitmaps);

		map = ptr_add_offset(db->big->bitcheck, bmap * BIG_BLKSIZE);
		if (bit_field_get(map, bit)) {
			g_warning("sdbm: \"%s\": "
				"%s from .pag refers to already seen block %lu in .dat",
				sdbm_name(db), what, (unsigned long) bno);
			return FALSE;
		}
	}

	return TRUE;
}
Exemplo n.º 12
0
/**
 * General tree traversal routine, in depth-first order.
 *
 * The "enter" function is called when we enter a node, and its returned
 * value is monitored: a FALSE indicates that the node should be skipped
 * (which includes its children) and that the action callback should not be
 * invoked.  When missing, it is as if the "enter" callback had returned TRUE.
 *
 * The "action" callback can be invokded before or after processing the node's
 * children, as indicated by the flags. That callback is allowed to free the
 * traversed node.
 *
 * @param tree		the tree descriptor
 * @param root		the item at which traversal starts
 * @param flags		nodes to visit + when to invoke action callback
 * @param curdepth	current depth
 * @param maxdepth	0 = root node only, 1 = root + its children, etc...
 * @param enter		(optional) callback when we enter a node
 * @param action	(mandatory) action on the node
 * @param data		user-defined argument passed to callbacks
 *
 * @return amount of nodes visited.
 */
static size_t
etree_traverse_internal(const etree_t *tree, node_t *root,
	unsigned flags, unsigned curdepth, unsigned maxdepth,
	match_fn_t enter, data_fn_t action, void *data)
{
	size_t visited = 0;
	void *item;
	bool actionable;
	node_t *child;

	etree_check(tree);

	if (curdepth > maxdepth)
		return 0;

	/*
	 * Check whether we need to execute action on the node.
	 */

	child = root->child;	/* Save pointer in case "action" frees node */

	if ((flags & ETREE_TRAVERSE_NON_LEAVES) && NULL != child)
		actionable = TRUE;
	else if ((flags & ETREE_TRAVERSE_LEAVES) && NULL == child)
		actionable = TRUE;
	else
		actionable = FALSE;

	item = ptr_add_offset(root, -tree->offset);

	if (enter != NULL && !(*enter)(item, data))
		return 0;

	if (actionable && (flags & ETREE_CALL_BEFORE))
		(*action)(item, data);		/* MUST NOT free node */

	/*
	 * Only visit children when we've not reached the maximum depth.
	 */

	if (curdepth != maxdepth) {
		node_t *n, *next;

		for (n = child; n != NULL; n = next) {
			next = n->sibling;
			visited += etree_traverse_internal(tree, n, flags,
				curdepth + 1, maxdepth, enter, action, data);
		}
	}

	if (actionable && (flags & ETREE_CALL_AFTER))
		(*action)(item, data);		/* Can safely free node */

	return visited + 1;		/* "+1" for this node */
}
Exemplo n.º 13
0
/**
 * Append child to parent.
 *
 * If this is a frequent operation, consider using an extended tree.
 */
void
etree_append_child(etree_t *tree, void *parent, void *child)
{
	node_t *cn, *pn;

	etree_check(tree);
	g_assert(parent != NULL);
	g_assert(child != NULL);
	g_assert(etree_is_orphan(tree, child));

	cn = ptr_add_offset(child, tree->offset);
	pn = ptr_add_offset(parent, tree->offset);
	
	if (etree_is_extended(tree)) {
		nodex_t *px = ptr_add_offset(parent, tree->offset);

		if (px->last_child != NULL) {
			node_t *lcn = px->last_child;

			g_assert(0 == ptr_cmp(lcn->parent, px));
			g_assert(NULL == lcn->sibling);

			lcn->sibling = cn;
		} else {
			g_assert(NULL == px->child);

			px->child = cn;
		}

		px->last_child = cn;
	} else {
		if (NULL == pn->child) {
			pn->child = cn;
		} else {
			node_t *lcn = etree_node_last_sibling(pn->child);
			lcn->sibling = cn;
		}
	}

	cn->parent = pn;
	tree->count = 0;		/* Tree count is now unknown */
}
Exemplo n.º 14
0
/**
 * @return pointer to last child of item, NULL if leaf item.
 */
void *
etree_last_child(const etree_t *tree, const void *item)
{
	etree_check(tree);

	if (etree_is_extended(tree)) {
		const nodex_t *n = const_ptr_add_offset(item, tree->offset);
		if (NULL == n->last_child)
			return NULL;
		return ptr_add_offset(n->last_child, -tree->offset);
	} else {
		const node_t *n = const_ptr_add_offset(item, tree->offset);
		node_t *sn = etree_node_last_sibling(n->child);

		if (NULL == sn)
			return NULL;

		return ptr_add_offset(sn, -tree->offset);
	}
}
Exemplo n.º 15
0
/**
 * Fetch value from the hash set.
 *
 * @param ht		the hash table
 * @param key		the key being looked up
 *
 * @return found value, or NULL if not found.
 */
void *
hevset_lookup(const hevset_t *ht, const void *key)
{
	size_t idx;
	void *kptr;

	hevset_check(ht);
	g_assert(key != NULL);

	idx = hash_lookup_key(HASH(ht), key);

	if ((size_t) -1 == idx)
		return NULL;

	kptr = deconstify_pointer(ht->kset.keys[idx]);
	return ptr_add_offset(kptr, -ht->offset);
}
Exemplo n.º 16
0
/**
 * Look up key in the tree, returning the associated key item if found.
 *
 * @param tree		the red-black tree
 * @param key		pointer to the key structure (NOT a node)
 *
 * @return found item associated with key, NULL if not found.
 */
void *
erbtree_lookup(const erbtree_t *tree, const void *key)
{
	rbnode_t *parent;
	bool is_left;
	rbnode_t *rn;

	erbtree_check(tree);
	g_assert(key != NULL);

	if (erbtree_is_extended(tree)) {
		rn = do_lookup_ext(ERBTREE_E(tree), key, &parent, &is_left);
	} else {
		rn = do_lookup(tree, key, &parent, &is_left);
	}

	return NULL == rn ? NULL : ptr_add_offset(rn, -tree->offset);
}
Exemplo n.º 17
0
/**
 * Load text symbols from the file into supplied table.
 *
 * @param bc		the BFD context pointing to the file
 * @param st		the symbol table where symbols should be added
 */
static void
bfd_util_load_text(bfd_ctx_t *bc, symbols_t *st)
{
	long i;
	asymbol* empty;
	void *p;

	bfd_ctx_check(bc);
	g_assert(st != NULL);

	if (0 == bc->count)
		return;

	mutex_lock_fast(&bc->lock);

	g_assert(bc->symbols != NULL);

	empty = bfd_make_empty_symbol(bc->handle);
	symbols_lock(st);

	for (
		i = 0, p = bc->symbols;
		i < bc->count;
		i++, p = ptr_add_offset(p, bc->symsize)
	) {
		asymbol *sym;
		symbol_info syminfo;

		sym = bfd_minisymbol_to_symbol(bc->handle, bc->dynamic, p, empty);
		bfd_get_symbol_info(bc->handle, sym, &syminfo);

		if ('T' == syminfo.type || 't' == syminfo.type) {
			const char *name = bfd_asymbol_name(sym);

			if (name != NULL && name[0] != '.') {
				void *addr = ulong_to_pointer(syminfo.value);
				symbols_append(st, addr, name);
			}
		}
	}

	symbols_unlock(st);
	mutex_unlock_fast(&bc->lock);
}
Exemplo n.º 18
0
/**
 * Fetch key/value from the hash table, returning whether the key exists.
 * If it does, the original value pointer is written valptr.
 *
 * @param ht		the hash table
 * @param key		the key being looked up
 * @param valptr	if non-NULL, where the original value pointer is written
 *
 * @return whether key exists in the table.
 */
bool
hikset_lookup_extended(const hikset_t *ht, const void *key, void **valptr)
{
	size_t idx;

	hikset_check(ht);

	idx = hash_lookup_key(HASH(ht), &key);

	if ((size_t) -1 == idx)
		return FALSE;

	if (valptr != NULL) {
		void *kptr = deconstify_pointer(ht->kset.keys[idx]);
		*valptr = ptr_add_offset(kptr, -ht->offset);
	}

	return TRUE;
}
Exemplo n.º 19
0
/**
 * End bitmap allocation checks that have been started by the usage of one
 * of the bigkey_mark_used() and bigval_mark_used() routines.
 *
 * @return the amount of corrections brought to the bitmap, 0 meaning
 * everything was consistent.
 */
size_t
big_check_end(DBM *db)
{
	DBMBIG *dbg = db->big;
	long i;
	size_t adjustments = 0;

	if (NULL == dbg->bitcheck)
		return 0;

	for (i = 0; i < dbg->bitmaps; i++) {
		if (!fetch_bitbuf(db, i)) {
			adjustments += BIG_BITCOUNT;	/* Say, everything was wrong */
		} else {
			guint8 *p = ptr_add_offset(dbg->bitcheck, i * BIG_BLKSIZE);
			guint8 *q = dbg->bitbuf;
			size_t j;
			size_t old_adjustments = adjustments;

			for (j = 0; j < BIG_BLKSIZE; j++, p++, q++) {
				guint8 mismatch = *p ^ *q;
				if (mismatch) {
					adjustments += bits_set(mismatch);
					*q = *p;
				}
			}

			if (old_adjustments != adjustments) {
				size_t adj = adjustments - old_adjustments;

				flush_bitbuf(db);

				g_warning("sdbm: \"%s\": adjusted %lu bit%s in bitmap #%ld",
					sdbm_name(db), (unsigned long) adj, 1 == adj ? "" : "s", i);
			}
		}
	}

	HFREE_NULL(dbg->bitcheck);

	return adjustments;
}
Exemplo n.º 20
0
/**
 * Computes the root of the tree, starting from any item.
 *
 * This disregards the actual root in the etree_t structure passed, which may
 * be inaccurate, i.e. a sub-node of the actual tree.  The only accurate
 * information that etree_t must contain is the offset of the node_t within
 * the items.
 *
 * @param tree		the tree descriptor (with possible inaccurate root)
 * @param item		an item belonging to the tree
 *
 * @return the root of the tree to which item belongs.
 */
void *
etree_find_root(const etree_t *tree, const void *item)
{
	const node_t *n, *p;
	void *root;

	etree_check(tree);
	g_assert(item != NULL);

	n = const_ptr_add_offset(item, tree->offset);

	for (p = n; p != NULL; p = n->parent)
		n = p;

	root = ptr_add_offset(deconstify_pointer(n), -tree->offset);

	g_assert(etree_is_orphan(tree, root));	/* No parent, no sibling */

	return root;
}
Exemplo n.º 21
0
/**
 * Minimal pseudo-random number generation, combining a simple PRNG with
 * past-collected entropy.
 *
 * @return a 31-bit random number.
 */
static int
entropy_rand31(void)
{
	int result;
	static size_t offset;

	result = rand31();

	/*
	 * Combine with previously generated entropy to create even better
	 * randomness.  That previous entropy is refreshed each time a new
	 * entropy collection cycle is initiated.  We simply loop over the
	 * five 32-bit words, interpreted in a big-endian way.
	 */

	result += peek_be32(ptr_add_offset(&entropy_previous, offset));
	offset = (offset + 4) % sizeof entropy_previous;

	return result & RAND31_MASK;
}
Exemplo n.º 22
0
/**
 * Fetch value from the hash set.
 *
 * @param ht		the hash table
 * @param key		the key being looked up
 *
 * @return found value, or NULL if not found.
 */
void *
hikset_lookup(const hikset_t *ht, const void *key)
{
	size_t idx;
	void *kptr;

	hikset_check(ht);

	idx = hash_lookup_key(HASH(ht), &key);

	if ((size_t) -1 == idx)
		return NULL;

	/*
	 * We stored a pointer to the key in the value structure.
	 * To get the start of the value, we simply offset that pointer.
	 */

	kptr = deconstify_pointer(ht->kset.keys[idx]);
	return ptr_add_offset(kptr, -ht->offset);
}
Exemplo n.º 23
0
/**
 * Fetch next entry from iterator.
 *
 * @param hxi	the hash table iterator
 * @param vp	where value is written, if non-NULL
 *
 * @return TRUE if a new entry exists, FALSE otherwise.
 */
bool
hikset_iter_next(hikset_iter_t *hxi, void **vp)
{
	const hikset_t *hx;

	hikset_iter_check(hxi);

	hx = hxi->hx;

	while (hxi->pos < hx->kset.size && !HASH_IS_REAL(hx->kset.hashes[hxi->pos]))
		hxi->pos++;

	if (hxi->pos >= hx->kset.size)
		return FALSE;

	if (vp != NULL) {
		void *kptr = deconstify_pointer(hx->kset.keys[hxi->pos]);
		*vp = ptr_add_offset(kptr, -hx->offset);
	}

	hxi->pos++;
	return TRUE;
}
Exemplo n.º 24
0
/**
 * Traverse table, invoking callback for each entry.
 *
 * @param hx	the hash table
 * @param fn	callback to invoke
 * @param data	additional callback parameter
 */
void
hikset_foreach(const hikset_t *hx, data_fn_t fn, void *data)
{
	unsigned *hp, *end;
	size_t i, n;

	hikset_check(hx);

	end = &hx->kset.hashes[hx->kset.size];
	hash_refcnt_inc(HASH(hx));		/* Prevent any key relocation */

	for (i = n = 0, hp = hx->kset.hashes; hp != end; i++, hp++) {
		if (HASH_IS_REAL(*hp)) {
			void *kptr = deconstify_pointer(hx->kset.keys[i]);
			(*fn)(ptr_add_offset(kptr, -hx->offset), data);
			n++;
		}
	}

	g_assert(n == hx->kset.items);

	hash_refcnt_dec(HASH(hx));
}
Exemplo n.º 25
0
/**
 * Collect entropy by randomly feeding values from array.
 */
static void
entropy_array_data_collect(SHA1Context *ctx,
	enum entropy_data data, void *ary, size_t len, size_t elem_size)
{
	size_t i;
	void *p;

	g_assert(ctx != NULL);
	g_assert(ary != NULL);
	g_assert(size_is_non_negative(len));
	g_assert(size_is_positive(elem_size));

	entropy_array_shuffle(ary, len, elem_size);

	for (i = 0, p = ary; i < len; i++, p = ptr_add_offset(p, elem_size)) {
		switch (data) {
		case ENTROPY_ULONG:
			sha1_feed_ulong(ctx, *(unsigned long *) p);
			break;
		case ENTROPY_STRING:
			sha1_feed_string(ctx, *(char **) p);
			break;
		case ENTROPY_STAT:
			sha1_feed_stat(ctx, *(char **) p);
			break;
		case ENTROPY_FSTAT:
			sha1_feed_fstat(ctx, *(int *) p);
			break;
		case ENTROPY_DOUBLE:
			sha1_feed_double(ctx, *(double *) p);
			break;
		case ENTROPY_POINTER:
			sha1_feed_pointer(ctx, *(void **) p);
			break;
		}
	}
}
Exemplo n.º 26
0
/**
 * Traverse table, invoking callback for each entry and removing it when
 * the callback function returns TRUE.
 *
 * @param hx	the hash table
 * @param fn	callback to invoke
 * @param data	additional callback parameter
 *
 * @return the number of entries removed from the hash table.
 */
size_t
hikset_foreach_remove(hikset_t *hx, data_rm_fn_t fn, void *data)
{
	unsigned *hp, *end;
	size_t i, n, nr;

	hikset_check(hx);

	end = &hx->kset.hashes[hx->kset.size];
	hash_refcnt_inc(HASH(hx));		/* Prevent any key relocation */

	for (i = n = nr = 0, hp = hx->kset.hashes; hp != end; i++, hp++) {
		if (HASH_IS_REAL(*hp)) {
			void *kptr = deconstify_pointer(hx->kset.keys[i]);
			bool r = (*fn)(ptr_add_offset(kptr, -hx->offset), data);
			n++;
			if (r) {
				nr++;
				hash_keyset_erect_tombstone(&hx->kset, i);
				hx->stamp++;
			}
		}
	}

	g_assert(n == hx->kset.items);
	g_assert(nr <= hx->kset.items);

	hash_refcnt_dec(HASH(hx));

	hx->kset.items -= nr;

	if (nr != 0)
		hash_resize_as_needed(HASH(hx));

	return nr;
}
Exemplo n.º 27
0
/**
 * Detach item and all its sub-tree from a tree, making it the new root
 * of a smaller tree.
 */
void
etree_detach(etree_t *tree, void *item)
{
	node_t *n;

	etree_check(tree);
	g_assert(item != NULL);

	n = ptr_add_offset(item, tree->offset);

	if (NULL == n->parent) {
		/* Root node already, cannot have any sibling */
		g_assert(NULL == n->sibling);
		g_assert(n == tree->root);
		tree->root = NULL;			/* Root node is now detached */
		tree->count = 0;
	} else {
		node_t *parent = n->parent;

		if (n == parent->child) {
			if (etree_is_extended(tree)) {
				nodex_t *px = (nodex_t *) parent;
				if (n == px->last_child) {
					g_assert(NULL == n->sibling);	/* Last child! */
					px->child = px->last_child = NULL;
				} else {
					g_assert(NULL != n->sibling);	/* Not last child */
					parent->child = n->sibling;
				}
			} else {
				parent->child = n->sibling;
			}
		} else {
			node_t *cn;
			bool found = FALSE;

			/*
			 * Not removing first child of parent, so locate its previous
			 * sibling in the list of the parent's immediate children.
			 */

			for (cn = parent->child; cn != NULL; cn = cn->sibling) {
				if (cn->sibling == n) {
					found = TRUE;
					break;
				}
			}

			g_assert(found);			/* Must find it or tree is corrupted */

			cn->sibling = n->sibling;	/* Remove node ``n'' from list */

			/*
			 * Update ``last_child'' in the parent node if we removed the
			 * last child node.
			 */

			if (etree_is_extended(tree)) {
				nodex_t *px = (nodex_t *) parent;

				if (n == px->last_child) {
					g_assert(NULL == n->sibling);
					px->last_child = cn;
				}
			}
		}

		n->sibling = NULL;			/* Node is now a root node */
		tree->count = 0;			/* Count is now unknown */
	}
}
Exemplo n.º 28
0
/**
 * Allocate blocks (consecutive if possible) from the .dat file.
 * Block numbers are written back in the specified vector, in sequence.
 *
 * Blocks are always allocated with increasing block numbers, i.e. the list
 * of block numbers returned is guaranteed to be sorted.  This will help
 * upper layers to quickly determine whether all the blocks are contiguous
 * for instance.
 *
 * The file is extended as necessary to be able to allocate the blocks but
 * this is only done when there are no more free blocks available.
 *
 * @param db		the sdbm database
 * @param bvec		vector where allocated block numbers will be stored
 * @param bcnt		amount of blocks in vector (amount to allocate)
 *
 * @return TRUE if we were able to allocate all the requested blocks.
 */
static gboolean
big_file_alloc(DBM *db, void *bvec, int bcnt)
{
	DBMBIG *dbg = db->big;
	size_t first;
	int n;
	void *q;
	int bmap = 0;		/* Initial bitmap from which we allocate */

	g_assert(bcnt > 0);
	g_return_val_if_fail(NULL == dbg->bitcheck, FALSE);

	if (-1 == dbg->fd && -1 == big_open(dbg))
		return FALSE;

	/*
	 * First try to allocate all the blocks sequentially.
	 */

retry:

	first = big_falloc_seq(db, bmap, bcnt);
	if (first != 0) {
		while (bcnt-- > 0) {
			bvec = poke_be32(bvec, first++);
		}
		goto success;
	}

	/*
	 * There are no "bcnt" consecutive free blocks in the file.
	 *
	 * Before extending the file, we're going to fill the holes as much
	 * as possible.
	 */

	for (first = 0, q = bvec, n = bcnt; n > 0; n--) {
		first = big_falloc(db, first + 1);
		if (0 == first)
			break;
		q = poke_be32(q, first);
	}

	if (0 == n)
		goto success;		/* Found the requested "bcnt" free blocks */

	/*
	 * Free the incompletely allocated blocks: since we're about to extend
	 * the file, we'll use consecutive blocks from the new chunk governed
	 * by the added empty bitmap.
	 */

	for (q = bvec, n = bcnt - n; n > 0; n--) {
		first = peek_be32(q);
		big_ffree(db, first);
		q = ptr_add_offset(q, sizeof(guint32));
	}

	/*
	 * Extend the file by allocating another bitmap.
	 */

	g_assert(0 == bmap);		/* Never retried yet */

	if (dbg->bitbuf_dirty && !flush_bitbuf(db))
		return FALSE;

	memset(dbg->bitbuf, 0, BIG_BLKSIZE);
	bit_field_set(dbg->bitbuf, 0);	/* First page is the bitmap itself */
	dbg->bitbno = dbg->bitmaps * BIG_BITCOUNT;
	dbg->bitmaps++;

	/*
	 * Now retry starting to allocate blocks from the newly added bitmap.
	 *
	 * This will likely succeed if we're trying to allocate less than 8 MiB
	 * worth of data (with 1 KiB blocks).
	 */

	bmap = dbg->bitmaps - 1;
	goto retry;

success:
	/*
	 * We successfully allocated blocks from the bitmap.
	 *
	 * If the database is not volatile, we need to flush the bitmap to disk
	 * immediately in case of a crash, to avoid reusing these parts of the file.
	 */

	if (!db->is_volatile && dbg->bitbuf_dirty && !flush_bitbuf(db)) {
		/* Cannot flush -> cannot allocate the blocks: free them */
		for (q = bvec, n = bcnt; n > 0; n--) {
			first = peek_be32(q);
			big_ffree(db, first);
			q = ptr_add_offset(q, sizeof(guint32));
		}
		return FALSE;
	}

	return TRUE;		/* Succeeded */
}
Exemplo n.º 29
0
/**
 * Look whether semi-reliable UDP header corresponds to valid traffic.
 *
 * This routine is only used for ambiguous traffic that looks like both
 * Gnutella and semi-reliable UDP: we want to make sure we're not mistaking
 * a legitimate semi-reliable fragment / ACK for a Gnutella message.
 *
 * @param utp		already classified semi-reliable protocol
 * @param s			socket which received the message
 * @param data		received data
 * @param len		length of data
 *
 * @return TRUE if message corresponds to valid semi-reliable UDP traffic.
 */
static bool
udp_is_valid_semi_reliable(enum udp_traffic utp, const gnutella_socket_t *s,
	const void *data, size_t len)
{
	struct ut_header uth;
	void *message = NULL;
	size_t msglen;
	bool valid = TRUE;

	/*
	 * Since we're talking about an ambiguous message, it is highly unlikely
	 * we'll ever be called with an acknowledgement: they should have been
	 * ruled out earlier as improbable since ACKs are short message, much
	 * shorter than a Gnuella header typically.
	 *
	 * So we'll only handle fragments for now, assuming ACKs are legitimate.
	 */

	gnet_stats_inc_general(GNR_UDP_AMBIGUOUS_DEEPER_INSPECTION);

	uth.count = udp_reliable_header_get_count(data);
	if (0 == uth.count)
		return TRUE;		/* Acknoweldgments */

	uth.part = udp_reliable_header_get_part(data) - 1;	/* Zero-based */
	uth.flags = udp_reliable_header_get_flags(data);
	uth.seqno = udp_reliable_header_get_seqno(data);

	/*
	 * We're going to ask the RX layer about the message: is it a known
	 * sequence ID for this host?
	 *
	 * This works only for messages with more than one fragment, of course,
	 * but chances are that, for these, we would have possibly already
	 * received another fragment, not mistaken as a Gnutella message...
	 *
	 * This is OK for acknowledged fragments: we're not going to acknowledge
	 * the unprocessed fragment, but we'll receive other fragments of the
	 * message, and later on we'll get a retransmission of the unprocessed
	 * fragment, which this time will be validated since we have already
	 * partially received the message.
	 */

	if (uth.count > 1) {
		rxdrv_t *rx;
		gnet_host_t from;

		gnet_host_set(&from, s->addr, s->port);
		rx = udp_get_rx_semi_reliable(utp, s->addr, len);

		return NULL == rx ? FALSE : ut_valid_message(rx, &uth, &from);
	}

	/*
	 * We're facing a single-fragment message.
	 *
	 * We can trivially probe it and validate it to see whether it can still
	 * be interpreted as a valid Gnutella message on its own...  If the answer
	 * is yes, then we can assert we're facing a valid semi-reliable UDP
	 * message.
	 *
	 * For deflated payloads, we already validated that the start of the
	 * payload is a well-formed zlib header, but we'll attempt deflation anyway
	 * so we will know for sure whether it's a valid message!
	 *
	 * Of course we're doing here work that will have to be redone later when
	 * processing the message, but this is for proper classification and not
	 * happening very often: only on a very very small fraction of messages for
	 * which there is a high level of ambiguity.
	 */

	g_assert(0 == uth.part);	/* First (and only) fragment */

	if (uth.flags & UDP_RF_DEFLATED) {
		int outlen = settings_max_msg_size();
		int ret;

		message = xmalloc(outlen);
		
		ret = zlib_inflate_into(
			const_ptr_add_offset(data, UDP_RELIABLE_HEADER_SIZE),
			len - UDP_RELIABLE_HEADER_SIZE,
			message, &outlen);

		if (ret != Z_OK) {
			valid = FALSE;		/* Does not inflate properly */
			goto done;
		}

		msglen = outlen;
	} else {
		message = ptr_add_offset(
			deconstify_pointer(data), UDP_RELIABLE_HEADER_SIZE);
		msglen = len - UDP_RELIABLE_HEADER_SIZE;
	}

	switch (utp) {
	case SEMI_RELIABLE_GTA:
		/*
		 * Assume message is valid if the Gnutella size header is consistent
		 * with the length of the whole message.
		 */

		{
			uint16 size;

			switch (gmsg_size_valid(message, &size)) {
			case GMSG_VALID:
			case GMSG_VALID_MARKED:
				break;
			case GMSG_VALID_NO_PROCESS: /* Header flags undefined for now */
			case GMSG_INVALID:
				valid = FALSE;
				goto done;
			}

			valid = (size_t) size + GTA_HEADER_SIZE == msglen;
		}
		break;
	case SEMI_RELIABLE_GND:
		valid = TRUE;			/* For now */
		break;
	case GNUTELLA:
	case DHT:
	case RUDP:
	case UNKNOWN:
		g_assert_not_reached();
	}

done:
	if (uth.flags & UDP_RF_DEFLATED)
		xfree(message);

	return valid;
}
Exemplo n.º 30
0
/**
 * Insert node in tree.
 *
 * @return the existing key if the key already existed, NULL if the node
 * was properly inserted.
 */
void * G_HOT
erbtree_insert(erbtree_t *tree, rbnode_t *node)
{
	rbnode_t *key, *parent;
	const void *kbase;
	bool is_left;

	erbtree_check(tree);
	g_assert(node != NULL);

	kbase = const_ptr_add_offset(node, -tree->offset);

	if (erbtree_is_extended(tree)) {
		key = do_lookup_ext(ERBTREE_E(tree), kbase, &parent, &is_left);
	} else {
		key = do_lookup(tree, kbase, &parent, &is_left);
	}

	if (key != NULL)
		return ptr_add_offset(key, -tree->offset);

	g_assert(!is_valid(node));	/* Not yet part of the tree */

	node->left = NULL;
	node->right = NULL;
	set_color(node, RB_RED);
	set_parent(node, parent);
	tree->count++;

	if (parent != NULL) {
		if (is_left) {
			if (parent == tree->first)
				tree->first = node;
		} else {
			if (parent == tree->last)
				tree->last = node;
		}
		set_child(parent, node, is_left);
	} else {
		tree->root = node;
		tree->first = node;
		tree->last = node;
	}

	/*
	 * Fixup the modified tree by recoloring nodes and performing
	 * rotations (2 at most) hence the red-black tree properties are
	 * preserved.
	 */

	while (NULL != (parent = get_parent(node)) && is_red(parent)) {
		rbnode_t *grandpa = get_parent(parent);

		if (parent == grandpa->left) {
			rbnode_t *uncle = grandpa->right;

			if (uncle != NULL && is_red(uncle)) {
				set_color(parent, RB_BLACK);
				set_color(uncle, RB_BLACK);
				set_color(grandpa, RB_RED);
				node = grandpa;
			} else {
				if (node == parent->right) {
					rotate_left(tree, parent);
					node = parent;
					parent = get_parent(node);
				}
				set_color(parent, RB_BLACK);
				set_color(grandpa, RB_RED);
				rotate_right(tree, grandpa);
			}
		} else {
			rbnode_t *uncle = grandpa->left;

			if (uncle != NULL && is_red(uncle)) {
				set_color(parent, RB_BLACK);
				set_color(uncle, RB_BLACK);
				set_color(grandpa, RB_RED);
				node = grandpa;
			} else {
				if (node == parent->left) {
					rotate_right(tree, parent);
					node = parent;
					parent = get_parent(node);
				}
				set_color(parent, RB_BLACK);
				set_color(grandpa, RB_RED);
				rotate_left(tree, grandpa);
			}
		}
	}
	set_color(tree->root, RB_BLACK);
	return NULL;
}