Ejemplo n.º 1
0
/*
 * Finish the compression...
 */
static int
archive_compressor_xz_close(struct archive_write_filter *f)
{
	struct private_data *data = (struct private_data *)f->data;
	int ret, r1;

	ret = drive_compressor(f, data, 1);
	if (ret == ARCHIVE_OK) {
		data->total_out +=
		    data->compressed_buffer_size - data->stream.avail_out;
		ret = __archive_write_filter(f->next_filter,
		    data->compressed,
		    data->compressed_buffer_size - data->stream.avail_out);
		if (f->code == ARCHIVE_FILTER_LZIP && ret == ARCHIVE_OK) {
			archive_le32enc(data->compressed, data->crc32);
			archive_le64enc(data->compressed+4, data->total_in);
			archive_le64enc(data->compressed+12, data->total_out + 20);
			ret = __archive_write_filter(f->next_filter,
			    data->compressed, 20);
		}
	}
	lzma_end(&(data->stream));
	r1 = __archive_write_close_filter(f->next_filter);
	return (r1 < ret ? r1 : ret);
}
/*
 * Finish the compression.
 */
static int
archive_compressor_bzip2_close(struct archive_write_filter *f)
{
	struct private_data *data = (struct private_data *)f->data;
	int ret, r1;

	/* Finish compression cycle. */
	ret = drive_compressor(f, data, 1);
	if (ret == ARCHIVE_OK) {
		/* Write the last block */
		ret = __archive_write_filter(f->next_filter,
		    data->compressed,
		    data->compressed_buffer_size - data->stream.avail_out);
	}

	switch (BZ2_bzCompressEnd(&(data->stream))) {
	case BZ_OK:
		break;
	default:
		archive_set_error(f->archive, ARCHIVE_ERRNO_PROGRAMMER,
		    "Failed to clean up compressor");
		ret = ARCHIVE_FATAL;
	}

	r1 = __archive_write_close_filter(f->next_filter);
	return (r1 < ret ? r1 : ret);
}
/*
 * Write data to the compressed stream.
 */
static int
archive_compressor_xz_write(struct archive_write *a, const void *buff,
    size_t length)
{
	struct private_data *state;
	int ret;

	state = (struct private_data *)a->compressor.data;
	if (a->client_writer == NULL) {
		archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER,
		    "No write callback is registered?  "
		    "This is probably an internal programming error.");
		return (ARCHIVE_FATAL);
	}

	/* Update statistics */
	state->total_in += length;

	/* Compress input data to output buffer */
	state->stream.next_in = buff;
	state->stream.avail_in = length;
	if ((ret = drive_compressor(a, state, 0)) != ARCHIVE_OK)
		return (ret);

	a->archive.file_position += length;
	return (ARCHIVE_OK);
}
/*
 * Write data to the compressed stream.
 *
 * Returns ARCHIVE_OK if all data written, error otherwise.
 */
static int
archive_compressor_bzip2_write(struct archive_write_filter *f,
    const void *buff, size_t length)
{
	struct private_data *data = (struct private_data *)f->data;

	/* Update statistics */
	data->total_in += length;

	/* Compress input data to output buffer */
	SET_NEXT_IN(data, buff);
	data->stream.avail_in = length;
	if (drive_compressor(f, data, 0))
		return (ARCHIVE_FATAL);
	return (ARCHIVE_OK);
}
Ejemplo n.º 5
0
/*
 * Write data to the compressed stream.
 */
static int
archive_compressor_xz_write(struct archive_write_filter *f,
    const void *buff, size_t length)
{
	struct private_data *data = (struct private_data *)f->data;
	int ret;

	/* Update statistics */
	data->total_in += length;
	if (f->code == ARCHIVE_FILTER_LZIP)
		data->crc32 = lzma_crc32(buff, length, data->crc32);

	/* Compress input data to output buffer */
	data->stream.next_in = buff;
	data->stream.avail_in = length;
	if ((ret = drive_compressor(f, data, 0)) != ARCHIVE_OK)
		return (ret);

	return (ARCHIVE_OK);
}
/*
 * Finish the compression...
 */
static int
archive_compressor_xz_finish(struct archive_write *a)
{
	ssize_t block_length, target_block_length, bytes_written;
	int ret;
	struct private_data *state;
	unsigned tocopy;

	ret = ARCHIVE_OK;
	state = (struct private_data *)a->compressor.data;
	if (state != NULL) {
		if (a->client_writer == NULL) {
			archive_set_error(&a->archive,
			    ARCHIVE_ERRNO_PROGRAMMER,
			    "No write callback is registered?  "
			    "This is probably an internal programming error.");
			ret = ARCHIVE_FATAL;
			goto cleanup;
		}

		/* By default, always pad the uncompressed data. */
		if (a->pad_uncompressed) {
			tocopy = a->bytes_per_block -
			    (state->total_in % a->bytes_per_block);
			while (tocopy > 0 && tocopy < (unsigned)a->bytes_per_block) {
				state->stream.next_in = a->nulls;
				state->stream.avail_in = tocopy < a->null_length ?
				    tocopy : a->null_length;
				state->total_in += state->stream.avail_in;
				tocopy -= state->stream.avail_in;
				ret = drive_compressor(a, state, 0);
				if (ret != ARCHIVE_OK)
					goto cleanup;
			}
		}

		/* Finish compression cycle */
		if (((ret = drive_compressor(a, state, 1))) != ARCHIVE_OK)
			goto cleanup;

		/* Optionally, pad the final compressed block. */
		block_length = state->stream.next_out - state->compressed;

		/* Tricky calculation to determine size of last block. */
		if (a->bytes_in_last_block <= 0)
			/* Default or Zero: pad to full block */
			target_block_length = a->bytes_per_block;
		else
			/* Round length to next multiple of bytes_in_last_block. */
			target_block_length = a->bytes_in_last_block *
			    ( (block_length + a->bytes_in_last_block - 1) /
				a->bytes_in_last_block);
		if (target_block_length > a->bytes_per_block)
			target_block_length = a->bytes_per_block;
		if (block_length < target_block_length) {
			memset(state->stream.next_out, 0,
			    target_block_length - block_length);
			block_length = target_block_length;
		}

		/* Write the last block */
		bytes_written = (a->client_writer)(&a->archive, a->client_data,
		    state->compressed, block_length);
		if (bytes_written <= 0) {
			ret = ARCHIVE_FATAL;
			goto cleanup;
		}
		a->archive.raw_position += bytes_written;

		/* Cleanup: shut down compressor, release memory, etc. */
	cleanup:
		lzma_end(&(state->stream));
		free(state->compressed);
		free(state);
	}
	free(a->compressor.config);
	a->compressor.config = NULL;
	return (ret);
}
/*
 * Finish the compression.
 */
static int
archive_compressor_bzip2_finish(struct archive_write *a)
{
	ssize_t block_length;
	int ret;
	struct private_data *state;
	ssize_t target_block_length;
	ssize_t bytes_written;
	unsigned tocopy;

	ret = ARCHIVE_OK;
	state = (struct private_data *)a->compressor.data;
	if (state != NULL) {
		if (a->client_writer == NULL) {
			archive_set_error(&a->archive,
			    ARCHIVE_ERRNO_PROGRAMMER,
			    "No write callback is registered?\n"
			    "This is probably an internal programming error.");
			ret = ARCHIVE_FATAL;
			goto cleanup;
		}

		/* By default, always pad the uncompressed data. */
		if (a->pad_uncompressed) {
			tocopy = a->bytes_per_block -
			    (state->total_in % a->bytes_per_block);
			while (tocopy > 0 && tocopy < (unsigned)a->bytes_per_block) {
				SET_NEXT_IN(state, a->nulls);
				state->stream.avail_in = tocopy < a->null_length ?
				    tocopy : a->null_length;
				state->total_in += state->stream.avail_in;
				tocopy -= state->stream.avail_in;
				ret = drive_compressor(a, state, 0);
				if (ret != ARCHIVE_OK)
					goto cleanup;
			}
		}

		/* Finish compression cycle. */
		if ((ret = drive_compressor(a, state, 1)))
			goto cleanup;

		/* Optionally, pad the final compressed block. */
		block_length = state->stream.next_out - state->compressed;

		/* Tricky calculation to determine size of last block. */
		if (a->bytes_in_last_block <= 0)
			/* Default or Zero: pad to full block */
			target_block_length = a->bytes_per_block;
		else
			/* Round length to next multiple of bytes_in_last_block. */
			target_block_length = a->bytes_in_last_block *
			    ( (block_length + a->bytes_in_last_block - 1) /
				a->bytes_in_last_block);
		if (target_block_length > a->bytes_per_block)
			target_block_length = a->bytes_per_block;
		if (block_length < target_block_length) {
			memset(state->stream.next_out, 0,
			    target_block_length - block_length);
			block_length = target_block_length;
		}

		/* Write the last block */
		bytes_written = (a->client_writer)(&a->archive, a->client_data,
		    state->compressed, block_length);

		/* TODO: Handle short write of final block. */
		if (bytes_written <= 0)
			ret = ARCHIVE_FATAL;
		else {
			a->archive.raw_position += ret;
			ret = ARCHIVE_OK;
		}

		/* Cleanup: shut down compressor, release memory, etc. */
cleanup:
		switch (BZ2_bzCompressEnd(&(state->stream))) {
		case BZ_OK:
			break;
		default:
			archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER,
			    "Failed to clean up compressor");
			ret = ARCHIVE_FATAL;
		}

		free(state->compressed);
		free(state);
	}
	/* Free configuration data even if we were never fully initialized. */
	free(a->compressor.config);
	a->compressor.config = NULL;
	return (ret);
}