/*
 * Write data to the compressed stream.
 *
 * Returns ARCHIVE_OK if all data written, error otherwise.
 */
static int
archive_compressor_bzip2_write(struct archive_write *a, const void *buff,
    size_t length)
{
	struct private_data *state;

	state = (struct private_data *)a->compressor.data;
	if (a->client_writer == NULL) {
		archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER,
		    "No write callback is registered?  "
		    "This is probably an internal programming error.");
		return (ARCHIVE_FATAL);
	}

	/* Update statistics */
	state->total_in += length;

	/* Compress input data to output buffer */
	SET_NEXT_IN(state, buff);
	state->stream.avail_in = length;
	if (drive_compressor(a, state, 0))
		return (ARCHIVE_FATAL);
	a->archive.file_position += length;
	return (ARCHIVE_OK);
}
/*
 * Write data to the compressed stream.
 *
 * Returns ARCHIVE_OK if all data written, error otherwise.
 */
static int
archive_compressor_bzip2_write(struct archive_write_filter *f,
    const void *buff, size_t length)
{
	struct private_data *data = (struct private_data *)f->data;

	/* Update statistics */
	data->total_in += length;

	/* Compress input data to output buffer */
	SET_NEXT_IN(data, buff);
	data->stream.avail_in = length;
	if (drive_compressor(f, data, 0))
		return (ARCHIVE_FATAL);
	return (ARCHIVE_OK);
}
/*
 * Finish the compression.
 */
static int
archive_compressor_bzip2_finish(struct archive_write *a)
{
	ssize_t block_length;
	int ret;
	struct private_data *state;
	ssize_t target_block_length;
	ssize_t bytes_written;
	unsigned tocopy;

	ret = ARCHIVE_OK;
	state = (struct private_data *)a->compressor.data;
	if (state != NULL) {
		if (a->client_writer == NULL) {
			archive_set_error(&a->archive,
			    ARCHIVE_ERRNO_PROGRAMMER,
			    "No write callback is registered?\n"
			    "This is probably an internal programming error.");
			ret = ARCHIVE_FATAL;
			goto cleanup;
		}

		/* By default, always pad the uncompressed data. */
		if (a->pad_uncompressed) {
			tocopy = a->bytes_per_block -
			    (state->total_in % a->bytes_per_block);
			while (tocopy > 0 && tocopy < (unsigned)a->bytes_per_block) {
				SET_NEXT_IN(state, a->nulls);
				state->stream.avail_in = tocopy < a->null_length ?
				    tocopy : a->null_length;
				state->total_in += state->stream.avail_in;
				tocopy -= state->stream.avail_in;
				ret = drive_compressor(a, state, 0);
				if (ret != ARCHIVE_OK)
					goto cleanup;
			}
		}

		/* Finish compression cycle. */
		if ((ret = drive_compressor(a, state, 1)))
			goto cleanup;

		/* Optionally, pad the final compressed block. */
		block_length = state->stream.next_out - state->compressed;

		/* Tricky calculation to determine size of last block. */
		if (a->bytes_in_last_block <= 0)
			/* Default or Zero: pad to full block */
			target_block_length = a->bytes_per_block;
		else
			/* Round length to next multiple of bytes_in_last_block. */
			target_block_length = a->bytes_in_last_block *
			    ( (block_length + a->bytes_in_last_block - 1) /
				a->bytes_in_last_block);
		if (target_block_length > a->bytes_per_block)
			target_block_length = a->bytes_per_block;
		if (block_length < target_block_length) {
			memset(state->stream.next_out, 0,
			    target_block_length - block_length);
			block_length = target_block_length;
		}

		/* Write the last block */
		bytes_written = (a->client_writer)(&a->archive, a->client_data,
		    state->compressed, block_length);

		/* TODO: Handle short write of final block. */
		if (bytes_written <= 0)
			ret = ARCHIVE_FATAL;
		else {
			a->archive.raw_position += ret;
			ret = ARCHIVE_OK;
		}

		/* Cleanup: shut down compressor, release memory, etc. */
cleanup:
		switch (BZ2_bzCompressEnd(&(state->stream))) {
		case BZ_OK:
			break;
		default:
			archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER,
			    "Failed to clean up compressor");
			ret = ARCHIVE_FATAL;
		}

		free(state->compressed);
		free(state);
	}
	/* Free configuration data even if we were never fully initialized. */
	free(a->compressor.config);
	a->compressor.config = NULL;
	return (ret);
}