static struct links_entry *
insert_entry(struct archive_entry_linkresolver *res,
    struct archive_entry *entry)
{
	struct links_entry *le;
	size_t hash, bucket;

	/* Add this entry to the links cache. */
	le = calloc(1, sizeof(struct links_entry));
	if (le == NULL)
		return (NULL);
	le->canonical = archive_entry_clone(entry);

	/* If the links cache is getting too full, enlarge the hash table. */
	if (res->number_entries > res->number_buckets * 2)
		grow_hash(res);

	hash = (size_t)(archive_entry_dev(entry) ^ archive_entry_ino64(entry));
	bucket = hash & (res->number_buckets - 1);

	/* If we could allocate the entry, record it. */
	if (res->buckets[bucket] != NULL)
		res->buckets[bucket]->previous = le;
	res->number_entries++;
	le->next = res->buckets[bucket];
	le->previous = NULL;
	res->buckets[bucket] = le;
	le->hash = hash;
	le->links = archive_entry_nlink(entry) - 1;
	return (le);
}
Example #2
0
int  hash_add(struct hash_table *hash_tbl, const void *key, void *data)
{
	int index;
	struct hashlist_element *element;
	element = (struct hashlist_element *)malloc(sizeof(struct hashlist_element));
	if(!element){
		dprintf(LOG_ERR, "Could not malloc hashlist_element");
		return (-1);
	}
	if (hash_full(hash_tbl)) {
	       grow_hash(hash_tbl);
	}
	index = hash_tbl->hash_function(key) % hash_tbl->hash_size;
	if (hash_search(hash_tbl, key)) {
		dprintf(LOG_DEBUG, "hash_add: duplicated item");
		return HASH_COLLISION;
	}
	element->next = hash_tbl->hash_list[index];
	hash_tbl->hash_list[index] = element;
	element->data = data;
	hash_tbl->hash_count++;
	return 0;
}            
static struct links_entry *
find_entry(struct archive_entry_linkresolver *res,
    struct archive_entry *entry)
{
	struct links_entry	*le;
	int			 hash, bucket;
	dev_t			 dev;
#ifndef __minix
	int64_t			 ino;
#else
	int32_t			ino;
#endif

	/* Free a held entry. */
	if (res->spare != NULL) {
		archive_entry_free(res->spare->canonical);
		archive_entry_free(res->spare->entry);
		free(res->spare);
		res->spare = NULL;
	}

	/* If the links cache overflowed and got flushed, don't bother. */
	if (res->buckets == NULL)
		return (NULL);

	dev = archive_entry_dev(entry);
#ifndef __minix
	ino = archive_entry_ino64(entry);
#else
	ino = archive_entry_ino(entry);
#endif
	hash = (int)(dev ^ ino);

	/* Try to locate this entry in the links cache. */
	bucket = hash % res->number_buckets;
	for (le = res->buckets[bucket]; le != NULL; le = le->next) {
#ifndef __minix
		if (le->hash == hash
		    && dev == archive_entry_dev(le->canonical)
		    && ino == archive_entry_ino64(le->canonical)) {
#else
		if (le->hash == hash
		    && dev == archive_entry_dev(le->canonical)
		    && ino == archive_entry_ino(le->canonical)) {
#endif
			/*
			 * Decrement link count each time and release
			 * the entry if it hits zero.  This saves
			 * memory and is necessary for detecting
			 * missed links.
			 */
			--le->links;
			if (le->links > 0)
				return (le);
			/* Remove it from this hash bucket. */
			if (le->previous != NULL)
				le->previous->next = le->next;
			if (le->next != NULL)
				le->next->previous = le->previous;
			if (res->buckets[bucket] == le)
				res->buckets[bucket] = le->next;
			res->number_entries--;
			/* Defer freeing this entry. */
			res->spare = le;
			return (le);
		}
	}
	return (NULL);
}


static struct links_entry *
next_entry(struct archive_entry_linkresolver *res)
{
	struct links_entry	*le;
	size_t			 bucket;

	/* Free a held entry. */
	if (res->spare != NULL) {
		archive_entry_free(res->spare->canonical);
		free(res->spare);
		res->spare = NULL;
	}

	/* If the links cache overflowed and got flushed, don't bother. */
	if (res->buckets == NULL)
		return (NULL);

	/* Look for next non-empty bucket in the links cache. */
	for (bucket = 0; bucket < res->number_buckets; bucket++) {
		le = res->buckets[bucket];
		if (le != NULL) {
			/* Remove it from this hash bucket. */
			if (le->next != NULL)
				le->next->previous = le->previous;
			res->buckets[bucket] = le->next;
			res->number_entries--;
			/* Defer freeing this entry. */
			res->spare = le;
			return (le);
		}
	}
	return (NULL);
}

static struct links_entry *
insert_entry(struct archive_entry_linkresolver *res,
    struct archive_entry *entry)
{
	struct links_entry *le;
	int			 hash, bucket;

	/* Add this entry to the links cache. */
	le = malloc(sizeof(struct links_entry));
	if (le == NULL)
		return (NULL);
	memset(le, 0, sizeof(*le));
	le->canonical = archive_entry_clone(entry);

	/* If the links cache is getting too full, enlarge the hash table. */
	if (res->number_entries > res->number_buckets * 2)
		grow_hash(res);

#ifndef __minix
	hash = archive_entry_dev(entry) ^ archive_entry_ino64(entry);
#else
	hash = ((int)archive_entry_dev(entry)) ^ ((int)archive_entry_ino(entry));
#endif
	bucket = hash % res->number_buckets;

	/* If we could allocate the entry, record it. */
	if (res->buckets[bucket] != NULL)
		res->buckets[bucket]->previous = le;
	res->number_entries++;
	le->next = res->buckets[bucket];
	le->previous = NULL;
	res->buckets[bucket] = le;
	le->hash = hash;
	le->links = archive_entry_nlink(entry) - 1;
	return (le);
}

static void
grow_hash(struct archive_entry_linkresolver *res)
{
	struct links_entry *le, **new_buckets;
	size_t new_size;
	size_t i, bucket;

	/* Try to enlarge the bucket list. */
	new_size = res->number_buckets * 2;
	new_buckets = malloc(new_size * sizeof(struct links_entry *));

	if (new_buckets != NULL) {
		memset(new_buckets, 0,
		    new_size * sizeof(struct links_entry *));
		for (i = 0; i < res->number_buckets; i++) {
			while (res->buckets[i] != NULL) {
				/* Remove entry from old bucket. */
				le = res->buckets[i];
				res->buckets[i] = le->next;

				/* Add entry to new bucket. */
				bucket = le->hash % new_size;

				if (new_buckets[bucket] != NULL)
					new_buckets[bucket]->previous =
					    le;
				le->next = new_buckets[bucket];
				le->previous = NULL;
				new_buckets[bucket] = le;
			}
		}
		free(res->buckets);
		res->buckets = new_buckets;
		res->number_buckets = new_size;
	}
}