static void write_hash_cache(struct hashfile *f, struct pack_idx_entry **index, uint32_t index_nr) { uint32_t i; for (i = 0; i < index_nr; ++i) { struct object_entry *entry = (struct object_entry *)index[i]; uint32_t hash_value = htonl(entry->hash); hashwrite(f, &hash_value, sizeof(hash_value)); } }
void bitmap_writer_finish(struct pack_idx_entry **index, uint32_t index_nr, const char *filename, uint16_t options) { static uint16_t default_version = 1; static uint16_t flags = BITMAP_OPT_FULL_DAG; struct strbuf tmp_file = STRBUF_INIT; struct hashfile *f; struct bitmap_disk_header header; int fd = odb_mkstemp(&tmp_file, "pack/tmp_bitmap_XXXXXX"); f = hashfd(fd, tmp_file.buf); memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)); header.version = htons(default_version); header.options = htons(flags | options); header.entry_count = htonl(writer.selected_nr); hashcpy(header.checksum, writer.pack_checksum); hashwrite(f, &header, sizeof(header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz); dump_bitmap(f, writer.commits); dump_bitmap(f, writer.trees); dump_bitmap(f, writer.blobs); dump_bitmap(f, writer.tags); write_selected_commits_v1(f, index, index_nr); if (options & BITMAP_OPT_HASH_CACHE) write_hash_cache(f, index, index_nr); finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); if (adjust_shared_perm(tmp_file.buf)) die_errno("unable to make temporary bitmap file readable"); if (rename(tmp_file.buf, filename)) die_errno("unable to rename temporary bitmap file to '%s'", filename); strbuf_release(&tmp_file); }
/* * Read the contents from fd for size bytes, streaming it to the * packfile in state while updating the hash in ctx. Signal a failure * by returning a negative value when the resulting pack would exceed * the pack size limit and this is not the first object in the pack, * so that the caller can discard what we wrote from the current pack * by truncating it and opening a new one. The caller will then call * us again after rewinding the input fd. * * The already_hashed_to pointer is kept untouched by the caller to * make sure we do not hash the same byte when we are called * again. This way, the caller does not have to checkpoint its hash * status before calling us just in case we ask it to call us again * with a new pack. */ static int stream_to_pack(struct bulk_checkin_state *state, git_hash_ctx *ctx, off_t *already_hashed_to, int fd, size_t size, enum object_type type, const char *path, unsigned flags) { git_zstream s; unsigned char obuf[16384]; unsigned hdrlen; int status = Z_OK; int write_object = (flags & HASH_WRITE_OBJECT); off_t offset = 0; git_deflate_init(&s, pack_compression_level); hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), type, size); s.next_out = obuf + hdrlen; s.avail_out = sizeof(obuf) - hdrlen; while (status != Z_STREAM_END) { unsigned char ibuf[16384]; if (size && !s.avail_in) { ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf); ssize_t read_result = read_in_full(fd, ibuf, rsize); if (read_result < 0) die_errno("failed to read from '%s'", path); if (read_result != rsize) die("failed to read %d bytes from '%s'", (int)rsize, path); offset += rsize; if (*already_hashed_to < offset) { size_t hsize = offset - *already_hashed_to; if (rsize < hsize) hsize = rsize; if (hsize) the_hash_algo->update_fn(ctx, ibuf, hsize); *already_hashed_to = offset; } s.next_in = ibuf; s.avail_in = rsize; size -= rsize; } status = git_deflate(&s, size ? 0 : Z_FINISH); if (!s.avail_out || status == Z_STREAM_END) { if (write_object) { size_t written = s.next_out - obuf; /* would we bust the size limit? */ if (state->nr_written && pack_size_limit_cfg && pack_size_limit_cfg < state->offset + written) { git_deflate_abort(&s); return -1; } hashwrite(state->f, obuf, written); state->offset += written; } s.next_out = obuf; s.avail_out = sizeof(obuf); } switch (status) { case Z_OK: case Z_BUF_ERROR: case Z_STREAM_END: continue; default: die("unexpected deflate failure: %d", status); } } git_deflate_end(&s); return 0; }
static int hashwrite_ewah_helper(void *f, const void *buf, size_t len) { /* hashwrite will die on error */ hashwrite(f, buf, len); return len; }