static void write_hash_cache(struct sha1file *f, struct pack_idx_entry **index, uint32_t index_nr) { uint32_t i; for (i = 0; i < index_nr; ++i) { struct object_entry *entry = (struct object_entry *)index[i]; uint32_t hash_value = htonl(entry->hash); sha1write(f, &hash_value, sizeof(hash_value)); } }
void fast_import_sha1write(struct sha1file *f, const void *buf, unsigned int count) { if (!pack_win) { pack_win = xcalloc(1, sizeof(*pack_data->windows)); pack_win->offset = 0; pack_win->len = 20; pack_win->base = xmalloc(packed_git_window_size); pack_win->next = NULL; } /* pack_data is not set the first time sha1write is called */ if (pack_data && !pack_data->windows) { pack_data->windows = pack_win; pack_data->pack_size = pack_win->len; } sha1write(f, buf, count); pack_win->last_used = -1; /* always last used */ pack_win->inuse_cnt = -1; if (pack_data) pack_data->pack_size += count; if (packed_git_window_size - pack_win->len >= count) { memcpy(pack_win->base + pack_win->len - 20, buf, count); pack_win->len += count; } else { /* We're closing the window, so we don't actually need * to copy the beginning of the data, only what will * remain in the new window. */ pack_win->offset += (((off_t) pack_win->len - 20 + count) / packed_git_window_size) * packed_git_window_size; pack_win->len = count % packed_git_window_size - (packed_git_window_size - pack_win->len); memcpy(pack_win->base, buf + count - pack_win->len + 20, pack_win->len - 20); /* Ensure a pack window on the data before that, otherwise, * use_pack() may try to create a window that overlaps with * this one, and that won't work because it won't be complete. */ sha1flush(f); if (prev_win) unuse_pack(&prev_win); use_pack(pack_data, &prev_win, pack_win->offset - packed_git_window_size, NULL); } }
void bitmap_writer_finish(struct pack_idx_entry **index, uint32_t index_nr, const char *filename, uint16_t options) { static char tmp_file[PATH_MAX]; static uint16_t default_version = 1; static uint16_t flags = BITMAP_OPT_FULL_DAG; struct sha1file *f; struct bitmap_disk_header header; int fd = odb_mkstemp(tmp_file, sizeof(tmp_file), "pack/tmp_bitmap_XXXXXX"); if (fd < 0) die_errno("unable to create '%s'", tmp_file); f = sha1fd(fd, tmp_file); memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)); header.version = htons(default_version); header.options = htons(flags | options); header.entry_count = htonl(writer.selected_nr); hashcpy(header.checksum, writer.pack_checksum); sha1write(f, &header, sizeof(header)); dump_bitmap(f, writer.commits); dump_bitmap(f, writer.trees); dump_bitmap(f, writer.blobs); dump_bitmap(f, writer.tags); write_selected_commits_v1(f, index, index_nr); if (options & BITMAP_OPT_HASH_CACHE) write_hash_cache(f, index, index_nr); sha1close(f, NULL, CSUM_FSYNC); if (adjust_shared_perm(tmp_file)) die_errno("unable to make temporary bitmap file readable"); if (rename(tmp_file, filename)) die_errno("unable to rename temporary bitmap file to '%s'", filename); }
static void write_selected_commits_v1(struct sha1file *f, struct pack_idx_entry **index, uint32_t index_nr) { int i; for (i = 0; i < writer.selected_nr; ++i) { struct bitmapped_commit *stored = &writer.selected[i]; struct bitmap_disk_entry on_disk; int commit_pos = sha1_pos(stored->commit->object.sha1, index, index_nr, sha1_access); if (commit_pos < 0) die("BUG: trying to write commit not in index"); on_disk.object_pos = htonl(commit_pos); on_disk.xor_offset = stored->xor_offset; on_disk.flags = stored->flags; sha1write(f, &on_disk, sizeof(on_disk)); dump_bitmap(f, stored->write_as); } }
static int sha1write_ewah_helper(void *f, const void *buf, size_t len) { /* sha1write will die on error */ sha1write(f, buf, len); return len; }
/* * Read the contents from fd for size bytes, streaming it to the * packfile in state while updating the hash in ctx. Signal a failure * by returning a negative value when the resulting pack would exceed * the pack size limit and this is not the first object in the pack, * so that the caller can discard what we wrote from the current pack * by truncating it and opening a new one. The caller will then call * us again after rewinding the input fd. * * The already_hashed_to pointer is kept untouched by the caller to * make sure we do not hash the same byte when we are called * again. This way, the caller does not have to checkpoint its hash * status before calling us just in case we ask it to call us again * with a new pack. */ static int stream_to_pack(struct bulk_checkin_state *state, git_SHA_CTX *ctx, off_t *already_hashed_to, int fd, size_t size, enum object_type type, const char *path, unsigned flags) { git_zstream s; unsigned char obuf[16384]; unsigned hdrlen; int status = Z_OK; int write_object = (flags & HASH_WRITE_OBJECT); off_t offset = 0; git_deflate_init(&s, pack_compression_level); hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), type, size); s.next_out = obuf + hdrlen; s.avail_out = sizeof(obuf) - hdrlen; while (status != Z_STREAM_END) { unsigned char ibuf[16384]; if (size && !s.avail_in) { ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf); if (read_in_full(fd, ibuf, rsize) != rsize) die("failed to read %d bytes from '%s'", (int)rsize, path); offset += rsize; if (*already_hashed_to < offset) { size_t hsize = offset - *already_hashed_to; if (rsize < hsize) hsize = rsize; if (hsize) git_SHA1_Update(ctx, ibuf, hsize); *already_hashed_to = offset; } s.next_in = ibuf; s.avail_in = rsize; size -= rsize; } status = git_deflate(&s, size ? 0 : Z_FINISH); if (!s.avail_out || status == Z_STREAM_END) { if (write_object) { size_t written = s.next_out - obuf; /* would we bust the size limit? */ if (state->nr_written && pack_size_limit_cfg && pack_size_limit_cfg < state->offset + written) { git_deflate_abort(&s); return -1; } sha1write(state->f, obuf, written); state->offset += written; } s.next_out = obuf; s.avail_out = sizeof(obuf); } switch (status) { case Z_OK: case Z_BUF_ERROR: case Z_STREAM_END: continue; default: die("unexpected deflate failure: %d", status); } } git_deflate_end(&s); return 0; }