static struct links_entry *
find_entry(struct archive_entry_linkresolver *res,
    struct archive_entry *entry)
{
	struct links_entry	*le;
	size_t			 hash, bucket;
	dev_t			 dev;
	int64_t			 ino;

	/* Free a held entry. */
	if (res->spare != NULL) {
		archive_entry_free(res->spare->canonical);
		archive_entry_free(res->spare->entry);
		free(res->spare);
		res->spare = NULL;
	}

	dev = archive_entry_dev(entry);
	ino = archive_entry_ino64(entry);
	hash = (size_t)(dev ^ ino);

	/* Try to locate this entry in the links cache. */
	bucket = hash & (res->number_buckets - 1);
	for (le = res->buckets[bucket]; le != NULL; le = le->next) {
		if (le->hash == hash
		    && dev == archive_entry_dev(le->canonical)
		    && ino == archive_entry_ino64(le->canonical)) {
			/*
			 * Decrement link count each time and release
			 * the entry if it hits zero.  This saves
			 * memory and is necessary for detecting
			 * missed links.
			 */
			--le->links;
			if (le->links > 0)
				return (le);
			/* Remove it from this hash bucket. */
			if (le->previous != NULL)
				le->previous->next = le->next;
			if (le->next != NULL)
				le->next->previous = le->previous;
			if (res->buckets[bucket] == le)
				res->buckets[bucket] = le->next;
			res->number_entries--;
			/* Defer freeing this entry. */
			res->spare = le;
			return (le);
		}
	}
	return (NULL);
}
static struct links_entry *
insert_entry(struct archive_entry_linkresolver *res,
    struct archive_entry *entry)
{
	struct links_entry *le;
	size_t hash, bucket;

	/* Add this entry to the links cache. */
	le = calloc(1, sizeof(struct links_entry));
	if (le == NULL)
		return (NULL);
	le->canonical = archive_entry_clone(entry);

	/* If the links cache is getting too full, enlarge the hash table. */
	if (res->number_entries > res->number_buckets * 2)
		grow_hash(res);

	hash = (size_t)(archive_entry_dev(entry) ^ archive_entry_ino64(entry));
	bucket = hash & (res->number_buckets - 1);

	/* If we could allocate the entry, record it. */
	if (res->buckets[bucket] != NULL)
		res->buckets[bucket]->previous = le;
	res->number_entries++;
	le->next = res->buckets[bucket];
	le->previous = NULL;
	res->buckets[bucket] = le;
	le->hash = hash;
	le->links = archive_entry_nlink(entry) - 1;
	return (le);
}
static int
record_hardlink(struct archive_read *a,
    struct cpio *cpio, struct archive_entry *entry)
{
	struct links_entry      *le;
	dev_t dev;
	int64_t ino;

	if (archive_entry_nlink(entry) <= 1)
		return (ARCHIVE_OK);

	dev = archive_entry_dev(entry);
	ino = archive_entry_ino64(entry);

	/*
	 * First look in the list of multiply-linked files.  If we've
	 * already dumped it, convert this entry to a hard link entry.
	 */
	for (le = cpio->links_head; le; le = le->next) {
		if (le->dev == dev && le->ino == ino) {
			archive_entry_copy_hardlink(entry, le->name);

			if (--le->links <= 0) {
				if (le->previous != NULL)
					le->previous->next = le->next;
				if (le->next != NULL)
					le->next->previous = le->previous;
				if (cpio->links_head == le)
					cpio->links_head = le->next;
				free(le->name);
				free(le);
			}

			return (ARCHIVE_OK);
		}
	}

	le = (struct links_entry *)malloc(sizeof(struct links_entry));
	if (le == NULL) {
		archive_set_error(&a->archive,
		    ENOMEM, "Out of memory adding file to list");
		return (ARCHIVE_FATAL);
	}
	if (cpio->links_head != NULL)
		cpio->links_head->previous = le;
	le->next = cpio->links_head;
	le->previous = NULL;
	cpio->links_head = le;
	le->dev = dev;
	le->ino = ino;
	le->links = archive_entry_nlink(entry) - 1;
	le->name = strdup(archive_entry_pathname(entry));
	if (le->name == NULL) {
		archive_set_error(&a->archive,
		    ENOMEM, "Out of memory adding file to list");
		return (ARCHIVE_FATAL);
	}

	return (ARCHIVE_OK);
}
예제 #4
0
/*
 * Write the appropriate header.
 */
static int
_archive_write_header(struct archive *_a, struct archive_entry *entry)
{
	struct archive_write *a = (struct archive_write *)_a;
	int ret, r2;

	__archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
	    ARCHIVE_STATE_DATA | ARCHIVE_STATE_HEADER, "archive_write_header");
	archive_clear_error(&a->archive);

	/* In particular, "retry" and "fatal" get returned immediately. */
	ret = archive_write_finish_entry(&a->archive);
	if (ret < ARCHIVE_OK && ret != ARCHIVE_WARN)
		return (ret);

	if (a->skip_file_dev != 0 &&
	    archive_entry_dev(entry) == a->skip_file_dev &&
	    a->skip_file_ino != 0 &&
	    archive_entry_ino64(entry) == a->skip_file_ino) {
		archive_set_error(&a->archive, 0,
		    "Can't add archive to itself");
		return (ARCHIVE_FAILED);
	}

	/* Format and write header. */
	r2 = ((a->format_write_header)(a, entry));
	if (r2 < ret)
		ret = r2;

	a->archive.state = ARCHIVE_STATE_DATA;
	return (ret);
}
예제 #5
0
static int ar_entry_dev(lua_State *L) {
    struct archive_entry* self = *ar_entry_check(L, 1);
    int is_set;
    if ( NULL == self ) return 0;

    is_set = ( lua_gettop(L) == 2 );
    lua_pushnumber(L, archive_entry_dev(self));
    if ( is_set ) {
        archive_entry_set_dev(self, lua_tonumber(L, 2));
    }
    return 1;
}
static void
record_hardlink(struct cpio *cpio, struct archive_entry *entry)
{
        struct links_entry      *le;
	dev_t dev;
	ino_t ino;

	dev = archive_entry_dev(entry);
	ino = archive_entry_ino(entry);

        /*
         * First look in the list of multiply-linked files.  If we've
         * already dumped it, convert this entry to a hard link entry.
         */
        for (le = cpio->links_head; le; le = le->next) {
                if (le->dev == dev && le->ino == ino) {
                        archive_entry_set_hardlink(entry, le->name);

                        if (--le->links <= 0) {
                                if (le->previous != NULL)
                                        le->previous->next = le->next;
                                if (le->next != NULL)
                                        le->next->previous = le->previous;
                                if (cpio->links_head == le)
                                        cpio->links_head = le->next;
                                free(le);
                        }

                        return;
                }
        }

        le = (struct links_entry *)malloc(sizeof(struct links_entry));
	if (le == NULL)
		__archive_errx(1, "Out of memory adding file to list");
        if (cpio->links_head != NULL)
                cpio->links_head->previous = le;
        le->next = cpio->links_head;
        le->previous = NULL;
        cpio->links_head = le;
        le->dev = dev;
        le->ino = ino;
        le->links = archive_entry_nlink(entry) - 1;
        le->name = strdup(archive_entry_pathname(entry));
	if (le->name == NULL)
		__archive_errx(1, "Out of memory adding file to list");
}
static int
archive_write_cpio_header(struct archive_write *a, struct archive_entry *entry)
{
	struct cpio *cpio;
	const char *p, *path;
	int pathlength, ret;
	struct cpio_header	 h;

	cpio = (struct cpio *)a->format_data;
	ret = 0;

	path = archive_entry_pathname(entry);
	pathlength = strlen(path) + 1; /* Include trailing null. */

	memset(&h, 0, sizeof(h));
	format_octal(070707, &h.c_magic, sizeof(h.c_magic));
	format_octal(archive_entry_dev(entry), &h.c_dev, sizeof(h.c_dev));
	/*
	 * TODO: Generate artificial inode numbers rather than just
	 * re-using the ones off the disk.  That way, the 18-bit c_ino
	 * field only limits the number of files in the archive.
	 */
	if (archive_entry_ino(entry) > 0777777) {
		archive_set_error(&a->archive, ERANGE, "large inode number truncated");
		ret = ARCHIVE_WARN;
	}

	format_octal(archive_entry_ino(entry) & 0777777, &h.c_ino, sizeof(h.c_ino));
	format_octal(archive_entry_mode(entry), &h.c_mode, sizeof(h.c_mode));
	format_octal(archive_entry_uid(entry), &h.c_uid, sizeof(h.c_uid));
	format_octal(archive_entry_gid(entry), &h.c_gid, sizeof(h.c_gid));
	format_octal(archive_entry_nlink(entry), &h.c_nlink, sizeof(h.c_nlink));
	if (archive_entry_filetype(entry) == AE_IFBLK
	    || archive_entry_filetype(entry) == AE_IFCHR)
	    format_octal(archive_entry_dev(entry), &h.c_rdev, sizeof(h.c_rdev));
	else
	    format_octal(0, &h.c_rdev, sizeof(h.c_rdev));
	format_octal(archive_entry_mtime(entry), &h.c_mtime, sizeof(h.c_mtime));
	format_octal(pathlength, &h.c_namesize, sizeof(h.c_namesize));

	/* Non-regular files don't store bodies. */
	if (archive_entry_filetype(entry) != AE_IFREG)
		archive_entry_set_size(entry, 0);

	/* Symlinks get the link written as the body of the entry. */
	p = archive_entry_symlink(entry);
	if (p != NULL  &&  *p != '\0')
		format_octal(strlen(p), &h.c_filesize, sizeof(h.c_filesize));
	else
		format_octal(archive_entry_size(entry),
		    &h.c_filesize, sizeof(h.c_filesize));

	ret = (a->compressor.write)(a, &h, sizeof(h));
	if (ret != ARCHIVE_OK)
		return (ARCHIVE_FATAL);

	ret = (a->compressor.write)(a, path, pathlength);
	if (ret != ARCHIVE_OK)
		return (ARCHIVE_FATAL);

	cpio->entry_bytes_remaining = archive_entry_size(entry);

	/* Write the symlink now. */
	if (p != NULL  &&  *p != '\0')
		ret = (a->compressor.write)(a, p, strlen(p));

	return (ret);
}
예제 #8
0
const struct stat *
archive_entry_stat(struct archive_entry *entry)
{
	struct stat *st;
	if (entry->stat == NULL) {
		entry->stat = calloc(1, sizeof(*st));
		if (entry->stat == NULL)
			return (NULL);
		entry->stat_valid = 0;
	}

	/*
	 * If none of the underlying fields have been changed, we
	 * don't need to regenerate.  In theory, we could use a bitmap
	 * here to flag only those items that have changed, but the
	 * extra complexity probably isn't worth it.  It will be very
	 * rare for anyone to change just one field then request a new
	 * stat structure.
	 */
	if (entry->stat_valid)
		return (entry->stat);

	st = entry->stat;
	/*
	 * Use the public interfaces to extract items, so that
	 * the appropriate conversions get invoked.
	 */
	st->st_atime = archive_entry_atime(entry);
#if HAVE_STRUCT_STAT_ST_BIRTHTIME
	st->st_birthtime = archive_entry_birthtime(entry);
#endif
	st->st_ctime = archive_entry_ctime(entry);
	st->st_mtime = archive_entry_mtime(entry);
	st->st_dev = archive_entry_dev(entry);
	st->st_gid = archive_entry_gid(entry);
	st->st_uid = archive_entry_uid(entry);
	st->st_ino = archive_entry_ino64(entry);
	st->st_nlink = archive_entry_nlink(entry);
	st->st_rdev = archive_entry_rdev(entry);
	st->st_size = archive_entry_size(entry);
	st->st_mode = archive_entry_mode(entry);

	/*
	 * On systems that support high-res timestamps, copy that
	 * information into struct stat.
	 */
#if HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC
	st->st_atimespec.tv_nsec = archive_entry_atime_nsec(entry);
	st->st_ctimespec.tv_nsec = archive_entry_ctime_nsec(entry);
	st->st_mtimespec.tv_nsec = archive_entry_mtime_nsec(entry);
#elif HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC
	st->st_atim.tv_nsec = archive_entry_atime_nsec(entry);
	st->st_ctim.tv_nsec = archive_entry_ctime_nsec(entry);
	st->st_mtim.tv_nsec = archive_entry_mtime_nsec(entry);
#elif HAVE_STRUCT_STAT_ST_MTIME_N
	st->st_atime_n = archive_entry_atime_nsec(entry);
	st->st_ctime_n = archive_entry_ctime_nsec(entry);
	st->st_mtime_n = archive_entry_mtime_nsec(entry);
#elif HAVE_STRUCT_STAT_ST_UMTIME
	st->st_uatime = archive_entry_atime_nsec(entry) / 1000;
	st->st_uctime = archive_entry_ctime_nsec(entry) / 1000;
	st->st_umtime = archive_entry_mtime_nsec(entry) / 1000;
#elif HAVE_STRUCT_STAT_ST_MTIME_USEC
	st->st_atime_usec = archive_entry_atime_nsec(entry) / 1000;
	st->st_ctime_usec = archive_entry_ctime_nsec(entry) / 1000;
	st->st_mtime_usec = archive_entry_mtime_nsec(entry) / 1000;
#endif
#if HAVE_STRUCT_STAT_ST_BIRTHTIMESPEC_TV_NSEC
	st->st_birthtimespec.tv_nsec = archive_entry_birthtime_nsec(entry);
#endif

	/*
	 * TODO: On Linux, store 32 or 64 here depending on whether
	 * the cached stat structure is a stat32 or a stat64.  This
	 * will allow us to support both variants interchangeably.
	 */
	entry->stat_valid = 1;

	return (st);
}
static struct links_entry *
find_entry(struct archive_entry_linkresolver *res,
    struct archive_entry *entry)
{
	struct links_entry	*le;
	int			 hash, bucket;
	dev_t			 dev;
#ifndef __minix
	int64_t			 ino;
#else
	int32_t			ino;
#endif

	/* Free a held entry. */
	if (res->spare != NULL) {
		archive_entry_free(res->spare->canonical);
		archive_entry_free(res->spare->entry);
		free(res->spare);
		res->spare = NULL;
	}

	/* If the links cache overflowed and got flushed, don't bother. */
	if (res->buckets == NULL)
		return (NULL);

	dev = archive_entry_dev(entry);
#ifndef __minix
	ino = archive_entry_ino64(entry);
#else
	ino = archive_entry_ino(entry);
#endif
	hash = (int)(dev ^ ino);

	/* Try to locate this entry in the links cache. */
	bucket = hash % res->number_buckets;
	for (le = res->buckets[bucket]; le != NULL; le = le->next) {
#ifndef __minix
		if (le->hash == hash
		    && dev == archive_entry_dev(le->canonical)
		    && ino == archive_entry_ino64(le->canonical)) {
#else
		if (le->hash == hash
		    && dev == archive_entry_dev(le->canonical)
		    && ino == archive_entry_ino(le->canonical)) {
#endif
			/*
			 * Decrement link count each time and release
			 * the entry if it hits zero.  This saves
			 * memory and is necessary for detecting
			 * missed links.
			 */
			--le->links;
			if (le->links > 0)
				return (le);
			/* Remove it from this hash bucket. */
			if (le->previous != NULL)
				le->previous->next = le->next;
			if (le->next != NULL)
				le->next->previous = le->previous;
			if (res->buckets[bucket] == le)
				res->buckets[bucket] = le->next;
			res->number_entries--;
			/* Defer freeing this entry. */
			res->spare = le;
			return (le);
		}
	}
	return (NULL);
}


static struct links_entry *
next_entry(struct archive_entry_linkresolver *res)
{
	struct links_entry	*le;
	size_t			 bucket;

	/* Free a held entry. */
	if (res->spare != NULL) {
		archive_entry_free(res->spare->canonical);
		free(res->spare);
		res->spare = NULL;
	}

	/* If the links cache overflowed and got flushed, don't bother. */
	if (res->buckets == NULL)
		return (NULL);

	/* Look for next non-empty bucket in the links cache. */
	for (bucket = 0; bucket < res->number_buckets; bucket++) {
		le = res->buckets[bucket];
		if (le != NULL) {
			/* Remove it from this hash bucket. */
			if (le->next != NULL)
				le->next->previous = le->previous;
			res->buckets[bucket] = le->next;
			res->number_entries--;
			/* Defer freeing this entry. */
			res->spare = le;
			return (le);
		}
	}
	return (NULL);
}

static struct links_entry *
insert_entry(struct archive_entry_linkresolver *res,
    struct archive_entry *entry)
{
	struct links_entry *le;
	int			 hash, bucket;

	/* Add this entry to the links cache. */
	le = malloc(sizeof(struct links_entry));
	if (le == NULL)
		return (NULL);
	memset(le, 0, sizeof(*le));
	le->canonical = archive_entry_clone(entry);

	/* If the links cache is getting too full, enlarge the hash table. */
	if (res->number_entries > res->number_buckets * 2)
		grow_hash(res);

#ifndef __minix
	hash = archive_entry_dev(entry) ^ archive_entry_ino64(entry);
#else
	hash = ((int)archive_entry_dev(entry)) ^ ((int)archive_entry_ino(entry));
#endif
	bucket = hash % res->number_buckets;

	/* If we could allocate the entry, record it. */
	if (res->buckets[bucket] != NULL)
		res->buckets[bucket]->previous = le;
	res->number_entries++;
	le->next = res->buckets[bucket];
	le->previous = NULL;
	res->buckets[bucket] = le;
	le->hash = hash;
	le->links = archive_entry_nlink(entry) - 1;
	return (le);
}

static void
grow_hash(struct archive_entry_linkresolver *res)
{
	struct links_entry *le, **new_buckets;
	size_t new_size;
	size_t i, bucket;

	/* Try to enlarge the bucket list. */
	new_size = res->number_buckets * 2;
	new_buckets = malloc(new_size * sizeof(struct links_entry *));

	if (new_buckets != NULL) {
		memset(new_buckets, 0,
		    new_size * sizeof(struct links_entry *));
		for (i = 0; i < res->number_buckets; i++) {
			while (res->buckets[i] != NULL) {
				/* Remove entry from old bucket. */
				le = res->buckets[i];
				res->buckets[i] = le->next;

				/* Add entry to new bucket. */
				bucket = le->hash % new_size;

				if (new_buckets[bucket] != NULL)
					new_buckets[bucket]->previous =
					    le;
				le->next = new_buckets[bucket];
				le->previous = NULL;
				new_buckets[bucket] = le;
			}
		}
		free(res->buckets);
		res->buckets = new_buckets;
		res->number_buckets = new_size;
	}
}
예제 #10
0
unsigned long Entry::dev()
{
    return archive_entry_dev(_entry);
}
const char *
archive_entry_linkresolve(struct archive_entry_linkresolver *links_cache,
    struct archive_entry *entry)
{
	struct links_entry	*le, **new_buckets;
	int			 hash;
	size_t			 i, new_size;
	dev_t			 dev;
	ino_t			 ino;
	int			 nlinks;


	/* Free a held name. */
	free(links_cache->last_name);
	links_cache->last_name = NULL;

	/* If the links cache overflowed and got flushed, don't bother. */
	if (links_cache->buckets == NULL)
		return (NULL);

	dev = archive_entry_dev(entry);
	ino = archive_entry_ino(entry);
	nlinks = archive_entry_nlink(entry);

	/* An entry with one link can't be a hard link. */
	if (nlinks == 1)
		return (NULL);

	/* If the links cache is getting too full, enlarge the hash table. */
	if (links_cache->number_entries > links_cache->number_buckets * 2)
	{
		/* Try to enlarge the bucket list. */
		new_size = links_cache->number_buckets * 2;
		new_buckets = malloc(new_size * sizeof(struct links_entry *));

		if (new_buckets != NULL) {
			memset(new_buckets, 0,
			    new_size * sizeof(struct links_entry *));
			for (i = 0; i < links_cache->number_buckets; i++) {
				while (links_cache->buckets[i] != NULL) {
					/* Remove entry from old bucket. */
					le = links_cache->buckets[i];
					links_cache->buckets[i] = le->next;

					/* Add entry to new bucket. */
					hash = (le->dev ^ le->ino) % new_size;

					if (new_buckets[hash] != NULL)
						new_buckets[hash]->previous =
						    le;
					le->next = new_buckets[hash];
					le->previous = NULL;
					new_buckets[hash] = le;
				}
			}
			free(links_cache->buckets);
			links_cache->buckets = new_buckets;
			links_cache->number_buckets = new_size;
		}
	}

	/* Try to locate this entry in the links cache. */
	hash = ( dev ^ ino ) % links_cache->number_buckets;
	for (le = links_cache->buckets[hash]; le != NULL; le = le->next) {
		if (le->dev == dev && le->ino == ino) {
			/*
			 * Decrement link count each time and release
			 * the entry if it hits zero.  This saves
			 * memory and is necessary for detecting
			 * missed links.
			 */
			--le->links;
			if (le->links > 0)
				return (le->name);
			/*
			 * When we release the entry, save the name
			 * until the next call.
			 */
			links_cache->last_name = le->name;
			/*
			 * Release the entry.
			 */
			if (le->previous != NULL)
				le->previous->next = le->next;
			if (le->next != NULL)
				le->next->previous = le->previous;
			if (links_cache->buckets[hash] == le)
				links_cache->buckets[hash] = le->next;
			links_cache->number_entries--;
			free(le);
			return (links_cache->last_name);
		}
	}

	/* Add this entry to the links cache. */
	le = malloc(sizeof(struct links_entry));
	if (le == NULL)
		return (NULL);
	le->name = strdup(archive_entry_pathname(entry));
	if (le->name == NULL) {
		free(le);
		return (NULL);
	}

	/* If we could allocate the entry, record it. */
	if (links_cache->buckets[hash] != NULL)
		links_cache->buckets[hash]->previous = le;
	links_cache->number_entries++;
	le->next = links_cache->buckets[hash];
	le->previous = NULL;
	links_cache->buckets[hash] = le;
	le->dev = dev;
	le->ino = ino;
	le->links = nlinks - 1;
	return (NULL);
}