Beispiel #1
0
/*
 * Read the contents from fd for size bytes, streaming it to the
 * packfile in state while updating the hash in ctx. Signal a failure
 * by returning a negative value when the resulting pack would exceed
 * the pack size limit and this is not the first object in the pack,
 * so that the caller can discard what we wrote from the current pack
 * by truncating it and opening a new one. The caller will then call
 * us again after rewinding the input fd.
 *
 * The already_hashed_to pointer is kept untouched by the caller to
 * make sure we do not hash the same byte when we are called
 * again. This way, the caller does not have to checkpoint its hash
 * status before calling us just in case we ask it to call us again
 * with a new pack.
 */
static int stream_to_pack(struct bulk_checkin_state *state,
			  git_SHA_CTX *ctx, off_t *already_hashed_to,
			  int fd, size_t size, enum object_type type,
			  const char *path, unsigned flags)
{
	git_zstream s;
	unsigned char obuf[16384];
	unsigned hdrlen;
	int status = Z_OK;
	int write_object = (flags & HASH_WRITE_OBJECT);
	off_t offset = 0;

	git_deflate_init(&s, pack_compression_level);

	hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), type, size);
	s.next_out = obuf + hdrlen;
	s.avail_out = sizeof(obuf) - hdrlen;

	while (status != Z_STREAM_END) {
		unsigned char ibuf[16384];

		if (size && !s.avail_in) {
			ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
			if (read_in_full(fd, ibuf, rsize) != rsize)
				die("failed to read %d bytes from '%s'",
				    (int)rsize, path);
			offset += rsize;
			if (*already_hashed_to < offset) {
				size_t hsize = offset - *already_hashed_to;
				if (rsize < hsize)
					hsize = rsize;
				if (hsize)
					git_SHA1_Update(ctx, ibuf, hsize);
				*already_hashed_to = offset;
			}
			s.next_in = ibuf;
			s.avail_in = rsize;
			size -= rsize;
		}

		status = git_deflate(&s, size ? 0 : Z_FINISH);

		if (!s.avail_out || status == Z_STREAM_END) {
			if (write_object) {
				size_t written = s.next_out - obuf;

				/* would we bust the size limit? */
				if (state->nr_written &&
				    pack_size_limit_cfg &&
				    pack_size_limit_cfg < state->offset + written) {
					git_deflate_abort(&s);
					return -1;
				}

				sha1write(state->f, obuf, written);
				state->offset += written;
			}
			s.next_out = obuf;
			s.avail_out = sizeof(obuf);
		}

		switch (status) {
		case Z_OK:
		case Z_BUF_ERROR:
		case Z_STREAM_END:
			continue;
		default:
			die("unexpected deflate failure: %d", status);
		}
	}
	git_deflate_end(&s);
	return 0;
}
static int write_zip_entry(struct archiver_args *args,
		const unsigned char *sha1,
		const char *path, size_t pathlen,
		unsigned int mode, int big_file_threshold,
		int zip_dir_size, int zip_dir_offset, int zip_dir,
		int zip_time, int zip_date, int zip_offset, int zip_dir_entries)
{
	struct zip_local_header header;
	struct zip_dir_header dirent;
	struct zip_extra_mtime extra;
	unsigned long attr2 = 0;
	unsigned long compressed_size = 0;
	unsigned long crc = 0;
	unsigned long direntsize = 0;
	int method = 0;
	int out = 0;
	int deflated = 0;
	int buffer = 0;
	int stream = 0;
	unsigned long flags = 0;
	unsigned long size = 0;

	crc = crc32(0, NULL, 0);

	if (!has_only_ascii(path)) {
		if (is_utf8(path))
			flags |= ZIP_UTF8;
		else
			warning("Path is not valid UTF-8: %s", path);
	}

	if (pathlen > 0xffff) {
		return error("path too long (%d chars, SHA1: %s): %s",
				(int)pathlen, sha1_to_hex(sha1), path);
	}

	if (S_ISDIR(mode) || S_ISGITLINK(mode)) {
		method = 0;
		attr2 = 16;
		out = NULL;
		size = 0;
		compressed_size = 0;
		buffer = NULL;
	} else if (S_ISREG(mode) || S_ISLNK(mode)) {
		int type = sha1_object_info(sha1, &size);

		method = 0;
		attr2 = S_ISLNK(mode) ? ((mode | 0777) << 16) :
				(mode & 0111) ? ((mode) << 16) : 0;
		if (S_ISREG(mode) && args->compression_level != 0 && size > 0)
			method = 8;
		if (method == 0) {
			compressed_size = size;
		} else {
			compressed_size = 0;
		}
		if (S_ISREG(mode) && type == OBJ_BLOB && !args->convert &&
				size > big_file_threshold) {
			stream = open_istream(sha1, &type, &size, NULL);
			if (!stream)
				return error("cannot stream blob %s",
						sha1_to_hex(sha1));
			flags |= ZIP_STREAM;
			out = buffer = NULL;
		} else {
			buffer = sha1_file_to_archive(args, path, sha1, mode,
					&type, &size);
			if (!buffer)
				return error("cannot read %s",
						sha1_to_hex(sha1));
			crc = crc32(crc, buffer, size);
			out = buffer;
		}

	} else {
		return error("unsupported file mode: 0%o (SHA1: %s)", mode,
				sha1_to_hex(sha1));
	}

	if (buffer && method == 8) {
		deflated = zlib_deflate(buffer, size, args->compression_level,
				&compressed_size);
		if (deflated && compressed_size - 6 < size) {
			/* ZLIB --> raw compressed data (see RFC 1950) */
			/* CMF and FLG ... */
			out = deflated + 2;
			compressed_size -= 6;	/* ... and ADLER32 */
		} else {
			method = 0;
			compressed_size = size;
		}
	}

	copy_le16(extra.magic, 0x5455);
	copy_le16(extra.extra_size, ZIP_EXTRA_MTIME_PAYLOAD_SIZE);
	extra.flags[0] = 1;	/* just mtime */
	copy_le32(extra.mtime, args->time);

	/* make sure we have enough free space in the dictionary */
	direntsize = ZIP_DIR_HEADER_SIZE + pathlen + ZIP_EXTRA_MTIME_SIZE;
	while (zip_dir_size < zip_dir_offset + direntsize) {
		zip_dir_size += ZIP_DIRECTORY_MIN_SIZE;
		zip_dir = xrealloc(zip_dir, zip_dir_size);
	}

	copy_le32(dirent.magic, 0x02014b50);
	copy_le16(dirent.creator_version,
			S_ISLNK(mode) || (S_ISREG(mode) && (mode & 0111)) ? 0x0317 : 0);
	copy_le16(dirent.version, 10);
	copy_le16(dirent.flags, flags);
	copy_le16(dirent.compression_method, method);
	copy_le16(dirent.mtime, zip_time);
	copy_le16(dirent.mdate, zip_date);
	set_zip_dir_data_desc(&dirent, size, compressed_size, crc);
	copy_le16(dirent.filename_length, pathlen);
	copy_le16(dirent.extra_length, ZIP_EXTRA_MTIME_SIZE);
	copy_le16(dirent.comment_length, 0);
	copy_le16(dirent.disk, 0);
	copy_le16(dirent.attr1, 0);
	copy_le32(dirent.attr2, attr2);
	copy_le32(dirent.offset, zip_offset);

	copy_le32(header.magic, 0x04034b50);
	copy_le16(header.version, 10);
	copy_le16(header.flags, flags);
	copy_le16(header.compression_method, method);
	copy_le16(header.mtime, zip_time);
	copy_le16(header.mdate, zip_date);
	set_zip_header_data_desc(&header, size, compressed_size, crc);
	copy_le16(header.filename_length, pathlen);
	copy_le16(header.extra_length, ZIP_EXTRA_MTIME_SIZE);
	write_or_die(1, &header, ZIP_LOCAL_HEADER_SIZE);
	zip_offset += ZIP_LOCAL_HEADER_SIZE;
	write_or_die(1, path, pathlen);
	zip_offset += pathlen;
	write_or_die(1, &extra, ZIP_EXTRA_MTIME_SIZE);
	zip_offset += ZIP_EXTRA_MTIME_SIZE;
	if (stream && method == 0) {
		unsigned char buf[STREAM_BUFFER_SIZE];
		ssize_t readlen = 0;

		for (;;) {
			readlen = read_istream(stream, buf, sizeof(buf));
			if (readlen <= 0)
				break;
			crc = crc32(crc, buf, readlen);
			write_or_die(1, buf, readlen);
		}
		close_istream(stream);
		if (readlen)
			return readlen;

		compressed_size = size;
		zip_offset += compressed_size;

		write_zip_data_desc(size, compressed_size, crc);
		zip_offset += ZIP_DATA_DESC_SIZE;

		set_zip_dir_data_desc(&dirent, size, compressed_size, crc);
	} else if (stream && method == 8) {
		int buf;
		ssize_t readlen;
		git_zstream zstream;
		int result;
		size_t out_len;
		int compressed;

		memset(&zstream, 0, sizeof(zstream));
		git_deflate_init(&zstream, args->compression_level);

		compressed_size = 0;
		zstream.next_out = compressed;
		zstream.avail_out = sizeof(compressed);

		for (;;) {
			readlen = read_istream(stream, buf, sizeof(buf));
			if (readlen <= 0)
				break;
			crc = crc32(crc, buf, readlen);

			zstream.next_in = buf;
			zstream.avail_in = readlen;
			result = git_deflate(&zstream, 0);
			if (result != Z_OK)
				die("deflate error (%d)", result);
			out = compressed;
			if (!compressed_size)
				out += 2;
			out_len = zstream.next_out - out;

			if (out_len > 0) {
				write_or_die(1, out, out_len);
				compressed_size += out_len;
				zstream.next_out = compressed;
				zstream.avail_out = sizeof(compressed);
			}

		}
		close_istream(stream);
		if (readlen)
			return readlen;

		zstream.next_in = buf;
		zstream.avail_in = 0;
		result = git_deflate(&zstream, Z_FINISH);
		if (result != Z_STREAM_END)
			die("deflate error (%d)", result);

		git_deflate_end(&zstream);
		out = compressed;
		if (!compressed_size)
			out += 2;
		out_len = zstream.next_out - out - 4;
		write_or_die(1, out, out_len);
		compressed_size += out_len;
		zip_offset += compressed_size;

		write_zip_data_desc(size, compressed_size, crc);
		zip_offset += ZIP_DATA_DESC_SIZE;

		set_zip_dir_data_desc(&dirent, size, compressed_size, crc);
	} else if (compressed_size > 0) {
		write_or_die(1, out, compressed_size);
		zip_offset += compressed_size;
	}

	free(deflated);
	free(buffer);

	memcpy(zip_dir + zip_dir_offset, &dirent, ZIP_DIR_HEADER_SIZE);
	zip_dir_offset += ZIP_DIR_HEADER_SIZE;
	memcpy(zip_dir + zip_dir_offset, path, pathlen);
	zip_dir_offset += pathlen;
	memcpy(zip_dir + zip_dir_offset, &extra, ZIP_EXTRA_MTIME_SIZE);
	zip_dir_offset += ZIP_EXTRA_MTIME_SIZE;
	zip_dir_entries++;

	return 0;
}
Beispiel #3
0
static void start_put(struct transfer_request *request)
{
	char *hex = oid_to_hex(&request->obj->oid);
	struct active_request_slot *slot;
	struct strbuf buf = STRBUF_INIT;
	enum object_type type;
	char hdr[50];
	void *unpacked;
	unsigned long len;
	int hdrlen;
	ssize_t size;
	git_zstream stream;

	unpacked = read_sha1_file(request->obj->oid.hash, &type, &len);
	hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;

	/* Set it up */
	git_deflate_init(&stream, zlib_compression_level);
	size = git_deflate_bound(&stream, len + hdrlen);
	strbuf_init(&request->buffer.buf, size);
	request->buffer.posn = 0;

	/* Compress it */
	stream.next_out = (unsigned char *)request->buffer.buf.buf;
	stream.avail_out = size;

	/* First header.. */
	stream.next_in = (void *)hdr;
	stream.avail_in = hdrlen;
	while (git_deflate(&stream, 0) == Z_OK)
		; /* nothing */

	/* Then the data itself.. */
	stream.next_in = unpacked;
	stream.avail_in = len;
	while (git_deflate(&stream, Z_FINISH) == Z_OK)
		; /* nothing */
	git_deflate_end(&stream);
	free(unpacked);

	request->buffer.buf.len = stream.total_out;

	strbuf_addstr(&buf, "Destination: ");
	append_remote_object_url(&buf, repo->url, hex, 0);
	request->dest = strbuf_detach(&buf, NULL);

	append_remote_object_url(&buf, repo->url, hex, 0);
	strbuf_add(&buf, request->lock->tmpfile_suffix, 41);
	request->url = strbuf_detach(&buf, NULL);

	slot = get_active_slot();
	slot->callback_func = process_response;
	slot->callback_data = request;
	curl_setup_http(slot->curl, request->url, DAV_PUT,
			&request->buffer, fwrite_null);

	if (start_active_slot(slot)) {
		request->slot = slot;
		request->state = RUN_PUT;
	} else {
		request->state = ABORTED;
		FREE_AND_NULL(request->url);
	}
}