static int data_object_in_hash_table(JournalFile *f, uint64_t hash, uint64_t p) { uint64_t n, h, q; int r; assert(f); n = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); if (n <= 0) return 0; r = journal_file_map_data_hash_table(f); if (r < 0) return log_error_errno(r, "Failed to map data hash table: %m"); h = hash % n; q = le64toh(f->data_hash_table[h].head_hash_offset); while (q != 0) { Object *o; if (p == q) return 1; r = journal_file_move_to_object(f, OBJECT_DATA, q, &o); if (r < 0) return r; q = le64toh(o->data.next_hash_offset); } return 0; }
int journal_file_hmac_put_object(JournalFile *f, ObjectType type, Object *o, uint64_t p) { int r; assert(f); if (!f->seal) return 0; r = journal_file_hmac_start(f); if (r < 0) return r; if (!o) { r = journal_file_move_to_object(f, type, p, &o); if (r < 0) return r; } else { if (type > OBJECT_UNUSED && o->object.type != type) return -EBADMSG; } gcry_md_write(f->hmac, o, offsetof(ObjectHeader, payload)); switch (o->object.type) { case OBJECT_DATA: /* All but hash and payload are mutable */ gcry_md_write(f->hmac, &o->data.hash, sizeof(o->data.hash)); gcry_md_write(f->hmac, o->data.payload, le64toh(o->object.size) - offsetof(DataObject, payload)); break; case OBJECT_FIELD: /* Same here */ gcry_md_write(f->hmac, &o->field.hash, sizeof(o->field.hash)); gcry_md_write(f->hmac, o->field.payload, le64toh(o->object.size) - offsetof(FieldObject, payload)); break; case OBJECT_ENTRY: /* All */ gcry_md_write(f->hmac, &o->entry.seqnum, le64toh(o->object.size) - offsetof(EntryObject, seqnum)); break; case OBJECT_FIELD_HASH_TABLE: case OBJECT_DATA_HASH_TABLE: case OBJECT_ENTRY_ARRAY: /* Nothing: everything is mutable */ break; case OBJECT_TAG: /* All but the tag itself */ gcry_md_write(f->hmac, &o->tag.seqnum, sizeof(o->tag.seqnum)); gcry_md_write(f->hmac, &o->tag.epoch, sizeof(o->tag.epoch)); break; default: return -EINVAL; } return 0; }
static int verify_entry( JournalFile *f, Object *o, uint64_t p, int data_fd, uint64_t n_data) { uint64_t i, n; int r; assert(f); assert(o); assert(data_fd >= 0); n = journal_file_entry_n_items(o); for (i = 0; i < n; i++) { uint64_t q, h; Object *u; q = le64toh(o->entry.items[i].object_offset); h = le64toh(o->entry.items[i].hash); if (!contains_uint64(f->mmap, data_fd, n_data, q)) { log_error("Invalid data object at entry %llu", (unsigned long long) p); return -EBADMSG; } r = journal_file_move_to_object(f, OBJECT_DATA, q, &u); if (r < 0) return r; if (le64toh(u->data.hash) != h) { log_error("Hash mismatch for data object at entry %llu", (unsigned long long) p); return -EBADMSG; } r = data_object_in_hash_table(f, h, q); if (r < 0) return r; if (r == 0) { log_error("Data object missing from hash at entry %llu", (unsigned long long) p); return -EBADMSG; } } return 0; }
int main(int argc, char *argv[]) { _cleanup_free_ char *fn = NULL; char dn[] = "/var/tmp/test-journal-flush.XXXXXX"; JournalFile *new_journal = NULL; sd_journal *j = NULL; unsigned n = 0; int r; assert_se(mkdtemp(dn)); fn = strappend(dn, "/test.journal"); r = journal_file_open(-1, fn, O_CREAT|O_RDWR, 0644, false, false, NULL, NULL, NULL, NULL, &new_journal); assert_se(r >= 0); r = sd_journal_open(&j, 0); assert_se(r >= 0); sd_journal_set_data_threshold(j, 0); SD_JOURNAL_FOREACH(j) { Object *o; JournalFile *f; f = j->current_file; assert_se(f && f->current_offset > 0); r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o); assert_se(r >= 0); r = journal_file_copy_entry(f, new_journal, o, f->current_offset, NULL, NULL, NULL); assert_se(r >= 0); n++; if (n > 10000) break; } sd_journal_close(j); (void) journal_file_close(new_journal); unlink(fn); assert_se(rmdir(dn) == 0); return 0; }
int journal_file_verify( JournalFile *f, const char *key, usec_t *first_contained, usec_t *last_validated, usec_t *last_contained, bool show_progress) { int r; Object *o; uint64_t p = 0, last_epoch = 0, last_tag_realtime = 0, last_sealed_realtime = 0; uint64_t entry_seqnum = 0, entry_monotonic = 0, entry_realtime = 0; sd_id128_t entry_boot_id; bool entry_seqnum_set = false, entry_monotonic_set = false, entry_realtime_set = false, found_main_entry_array = false; uint64_t n_weird = 0, n_objects = 0, n_entries = 0, n_data = 0, n_fields = 0, n_data_hash_tables = 0, n_field_hash_tables = 0, n_entry_arrays = 0, n_tags = 0; usec_t last_usec = 0; int data_fd = -1, entry_fd = -1, entry_array_fd = -1; unsigned i; bool found_last = false; #ifdef HAVE_GCRYPT uint64_t last_tag = 0; #endif assert(f); if (key) { #ifdef HAVE_GCRYPT r = journal_file_parse_verification_key(f, key); if (r < 0) { log_error("Failed to parse seed."); return r; } #else return -ENOTSUP; #endif } else if (f->seal) return -ENOKEY; data_fd = open_tmpfile("/var/tmp", O_RDWR | O_CLOEXEC); if (data_fd < 0) { log_error_errno(errno, "Failed to create data file: %m"); r = -errno; goto fail; } entry_fd = open_tmpfile("/var/tmp", O_RDWR | O_CLOEXEC); if (entry_fd < 0) { log_error_errno(errno, "Failed to create entry file: %m"); r = -errno; goto fail; } entry_array_fd = open_tmpfile("/var/tmp", O_RDWR | O_CLOEXEC); if (entry_array_fd < 0) { log_error_errno(errno, "Failed to create entry array file: %m"); r = -errno; goto fail; } if (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_SUPPORTED) { log_error("Cannot verify file with unknown extensions."); r = -ENOTSUP; goto fail; } for (i = 0; i < sizeof(f->header->reserved); i++) if (f->header->reserved[i] != 0) { error(offsetof(Header, reserved[i]), "Reserved field is non-zero"); r = -EBADMSG; goto fail; } /* First iteration: we go through all objects, verify the * superficial structure, headers, hashes. */ p = le64toh(f->header->header_size); for (;;) { /* Early exit if there are no objects in the file, at all */ if (le64toh(f->header->tail_object_offset) == 0) break; if (show_progress) draw_progress(scale_progress(0x7FFF, p, le64toh(f->header->tail_object_offset)), &last_usec); r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o); if (r < 0) { error(p, "Invalid object"); goto fail; } if (p > le64toh(f->header->tail_object_offset)) { error(offsetof(Header, tail_object_offset), "Invalid tail object pointer"); r = -EBADMSG; goto fail; } n_objects ++; r = journal_file_object_verify(f, p, o); if (r < 0) { error(p, "Invalid object contents: %s", strerror(-r)); goto fail; } if ((o->object.flags & OBJECT_COMPRESSED_XZ) && (o->object.flags & OBJECT_COMPRESSED_LZ4)) { error(p, "Objected with double compression"); r = -EINVAL; goto fail; } if ((o->object.flags & OBJECT_COMPRESSED_XZ) && !JOURNAL_HEADER_COMPRESSED_XZ(f->header)) { error(p, "XZ compressed object in file without XZ compression"); r = -EBADMSG; goto fail; } if ((o->object.flags & OBJECT_COMPRESSED_LZ4) && !JOURNAL_HEADER_COMPRESSED_LZ4(f->header)) { error(p, "LZ4 compressed object in file without LZ4 compression"); r = -EBADMSG; goto fail; } switch (o->object.type) { case OBJECT_DATA: r = write_uint64(data_fd, p); if (r < 0) goto fail; n_data++; break; case OBJECT_FIELD: n_fields++; break; case OBJECT_ENTRY: if (JOURNAL_HEADER_SEALED(f->header) && n_tags <= 0) { error(p, "First entry before first tag"); r = -EBADMSG; goto fail; } r = write_uint64(entry_fd, p); if (r < 0) goto fail; if (le64toh(o->entry.realtime) < last_tag_realtime) { error(p, "Older entry after newer tag"); r = -EBADMSG; goto fail; } if (!entry_seqnum_set && le64toh(o->entry.seqnum) != le64toh(f->header->head_entry_seqnum)) { error(p, "Head entry sequence number incorrect"); r = -EBADMSG; goto fail; } if (entry_seqnum_set && entry_seqnum >= le64toh(o->entry.seqnum)) { error(p, "Entry sequence number out of synchronization"); r = -EBADMSG; goto fail; } entry_seqnum = le64toh(o->entry.seqnum); entry_seqnum_set = true; if (entry_monotonic_set && sd_id128_equal(entry_boot_id, o->entry.boot_id) && entry_monotonic > le64toh(o->entry.monotonic)) { error(p, "Entry timestamp out of synchronization"); r = -EBADMSG; goto fail; } entry_monotonic = le64toh(o->entry.monotonic); entry_boot_id = o->entry.boot_id; entry_monotonic_set = true; if (!entry_realtime_set && le64toh(o->entry.realtime) != le64toh(f->header->head_entry_realtime)) { error(p, "Head entry realtime timestamp incorrect"); r = -EBADMSG; goto fail; } entry_realtime = le64toh(o->entry.realtime); entry_realtime_set = true; n_entries ++; break; case OBJECT_DATA_HASH_TABLE: if (n_data_hash_tables > 1) { error(p, "More than one data hash table"); r = -EBADMSG; goto fail; } if (le64toh(f->header->data_hash_table_offset) != p + offsetof(HashTableObject, items) || le64toh(f->header->data_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) { error(p, "header fields for data hash table invalid"); r = -EBADMSG; goto fail; } n_data_hash_tables++; break; case OBJECT_FIELD_HASH_TABLE: if (n_field_hash_tables > 1) { error(p, "More than one field hash table"); r = -EBADMSG; goto fail; } if (le64toh(f->header->field_hash_table_offset) != p + offsetof(HashTableObject, items) || le64toh(f->header->field_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) { error(p, "Header fields for field hash table invalid"); r = -EBADMSG; goto fail; } n_field_hash_tables++; break; case OBJECT_ENTRY_ARRAY: r = write_uint64(entry_array_fd, p); if (r < 0) goto fail; if (p == le64toh(f->header->entry_array_offset)) { if (found_main_entry_array) { error(p, "More than one main entry array"); r = -EBADMSG; goto fail; } found_main_entry_array = true; } n_entry_arrays++; break; case OBJECT_TAG: if (!JOURNAL_HEADER_SEALED(f->header)) { error(p, "Tag object in file without sealing"); r = -EBADMSG; goto fail; } if (le64toh(o->tag.seqnum) != n_tags + 1) { error(p, "Tag sequence number out of synchronization"); r = -EBADMSG; goto fail; } if (le64toh(o->tag.epoch) < last_epoch) { error(p, "Epoch sequence out of synchronization"); r = -EBADMSG; goto fail; } #ifdef HAVE_GCRYPT if (f->seal) { uint64_t q, rt; debug(p, "Checking tag %"PRIu64"...", le64toh(o->tag.seqnum)); rt = f->fss_start_usec + o->tag.epoch * f->fss_interval_usec; if (entry_realtime_set && entry_realtime >= rt + f->fss_interval_usec) { error(p, "tag/entry realtime timestamp out of synchronization"); r = -EBADMSG; goto fail; } /* OK, now we know the epoch. So let's now set * it, and calculate the HMAC for everything * since the last tag. */ r = journal_file_fsprg_seek(f, le64toh(o->tag.epoch)); if (r < 0) goto fail; r = journal_file_hmac_start(f); if (r < 0) goto fail; if (last_tag == 0) { r = journal_file_hmac_put_header(f); if (r < 0) goto fail; q = le64toh(f->header->header_size); } else q = last_tag; while (q <= p) { r = journal_file_move_to_object(f, OBJECT_UNUSED, q, &o); if (r < 0) goto fail; r = journal_file_hmac_put_object(f, OBJECT_UNUSED, o, q); if (r < 0) goto fail; q = q + ALIGN64(le64toh(o->object.size)); } /* Position might have changed, let's reposition things */ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o); if (r < 0) goto fail; if (memcmp(o->tag.tag, gcry_md_read(f->hmac, 0), TAG_LENGTH) != 0) { error(p, "Tag failed verification"); r = -EBADMSG; goto fail; } f->hmac_running = false; last_tag_realtime = rt; last_sealed_realtime = entry_realtime; } last_tag = p + ALIGN64(le64toh(o->object.size)); #endif last_epoch = le64toh(o->tag.epoch); n_tags ++; break; default: n_weird ++; } if (p == le64toh(f->header->tail_object_offset)) { found_last = true; break; } p = p + ALIGN64(le64toh(o->object.size)); }; if (!found_last && le64toh(f->header->tail_object_offset) != 0) { error(le64toh(f->header->tail_object_offset), "Tail object pointer dead"); r = -EBADMSG; goto fail; } if (n_objects != le64toh(f->header->n_objects)) { error(offsetof(Header, n_objects), "Object number mismatch"); r = -EBADMSG; goto fail; } if (n_entries != le64toh(f->header->n_entries)) { error(offsetof(Header, n_entries), "Entry number mismatch"); r = -EBADMSG; goto fail; } if (JOURNAL_HEADER_CONTAINS(f->header, n_data) && n_data != le64toh(f->header->n_data)) { error(offsetof(Header, n_data), "Data number mismatch"); r = -EBADMSG; goto fail; } if (JOURNAL_HEADER_CONTAINS(f->header, n_fields) && n_fields != le64toh(f->header->n_fields)) { error(offsetof(Header, n_fields), "Field number mismatch"); r = -EBADMSG; goto fail; } if (JOURNAL_HEADER_CONTAINS(f->header, n_tags) && n_tags != le64toh(f->header->n_tags)) { error(offsetof(Header, n_tags), "Tag number mismatch"); r = -EBADMSG; goto fail; } if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays) && n_entry_arrays != le64toh(f->header->n_entry_arrays)) { error(offsetof(Header, n_entry_arrays), "Entry array number mismatch"); r = -EBADMSG; goto fail; } if (!found_main_entry_array && le64toh(f->header->entry_array_offset) != 0) { error(0, "Missing entry array"); r = -EBADMSG; goto fail; } if (entry_seqnum_set && entry_seqnum != le64toh(f->header->tail_entry_seqnum)) { error(offsetof(Header, tail_entry_seqnum), "Invalid tail seqnum"); r = -EBADMSG; goto fail; } if (entry_monotonic_set && (!sd_id128_equal(entry_boot_id, f->header->boot_id) || entry_monotonic != le64toh(f->header->tail_entry_monotonic))) { error(0, "Invalid tail monotonic timestamp"); r = -EBADMSG; goto fail; } if (entry_realtime_set && entry_realtime != le64toh(f->header->tail_entry_realtime)) { error(0, "Invalid tail realtime timestamp"); r = -EBADMSG; goto fail; } /* Second iteration: we follow all objects referenced from the * two entry points: the object hash table and the entry * array. We also check that everything referenced (directly * or indirectly) in the data hash table also exists in the * entry array, and vice versa. Note that we do not care for * unreferenced objects. We only care that everything that is * referenced is consistent. */ r = verify_entry_array(f, data_fd, n_data, entry_fd, n_entries, entry_array_fd, n_entry_arrays, &last_usec, show_progress); if (r < 0) goto fail; r = verify_hash_table(f, data_fd, n_data, entry_fd, n_entries, entry_array_fd, n_entry_arrays, &last_usec, show_progress); if (r < 0) goto fail; if (show_progress) flush_progress(); mmap_cache_close_fd(f->mmap, data_fd); mmap_cache_close_fd(f->mmap, entry_fd); mmap_cache_close_fd(f->mmap, entry_array_fd); safe_close(data_fd); safe_close(entry_fd); safe_close(entry_array_fd); if (first_contained) *first_contained = le64toh(f->header->head_entry_realtime); if (last_validated) *last_validated = last_sealed_realtime; if (last_contained) *last_contained = le64toh(f->header->tail_entry_realtime); return 0; fail: if (show_progress) flush_progress(); log_error("File corruption detected at %s:"OFSfmt" (of %llu bytes, %"PRIu64"%%).", f->path, p, (unsigned long long) f->last_stat.st_size, 100 * p / f->last_stat.st_size); if (data_fd >= 0) { mmap_cache_close_fd(f->mmap, data_fd); safe_close(data_fd); } if (entry_fd >= 0) { mmap_cache_close_fd(f->mmap, entry_fd); safe_close(entry_fd); } if (entry_array_fd >= 0) { mmap_cache_close_fd(f->mmap, entry_array_fd); safe_close(entry_array_fd); } return r; }
static int verify_entry_array( JournalFile *f, int data_fd, uint64_t n_data, int entry_fd, uint64_t n_entries, int entry_array_fd, uint64_t n_entry_arrays, usec_t *last_usec, bool show_progress) { uint64_t i = 0, a, n, last = 0; int r; assert(f); assert(data_fd >= 0); assert(entry_fd >= 0); assert(entry_array_fd >= 0); assert(last_usec); n = le64toh(f->header->n_entries); a = le64toh(f->header->entry_array_offset); while (i < n) { uint64_t next, m, j; Object *o; if (show_progress) draw_progress(0x8000 + scale_progress(0x3FFF, i, n), last_usec); if (a == 0) { error(a, "Array chain too short at %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } if (!contains_uint64(f->mmap, entry_array_fd, n_entry_arrays, a)) { error(a, "Invalid array %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); if (r < 0) return r; next = le64toh(o->entry_array.next_entry_array_offset); if (next != 0 && next <= a) { error(a, "Array chain has cycle at %"PRIu64" of %"PRIu64" (jumps back from to "OFSfmt")", i, n, next); return -EBADMSG; } m = journal_file_entry_array_n_items(o); for (j = 0; i < n && j < m; i++, j++) { uint64_t p; p = le64toh(o->entry_array.items[j]); if (p <= last) { error(a, "Entry array not sorted at %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } last = p; if (!contains_uint64(f->mmap, entry_fd, n_entries, p)) { error(a, "Invalid array entry at %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o); if (r < 0) return r; r = verify_entry(f, o, p, data_fd, n_data); if (r < 0) return r; /* Pointer might have moved, reposition */ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); if (r < 0) return r; } a = next; } return 0; }
static int verify_hash_table( JournalFile *f, int data_fd, uint64_t n_data, int entry_fd, uint64_t n_entries, int entry_array_fd, uint64_t n_entry_arrays, usec_t *last_usec, bool show_progress) { uint64_t i, n; int r; assert(f); assert(data_fd >= 0); assert(entry_fd >= 0); assert(entry_array_fd >= 0); assert(last_usec); n = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); if (n <= 0) return 0; r = journal_file_map_data_hash_table(f); if (r < 0) return log_error_errno(r, "Failed to map data hash table: %m"); for (i = 0; i < n; i++) { uint64_t last = 0, p; if (show_progress) draw_progress(0xC000 + scale_progress(0x3FFF, i, n), last_usec); p = le64toh(f->data_hash_table[i].head_hash_offset); while (p != 0) { Object *o; uint64_t next; if (!contains_uint64(f->mmap, data_fd, n_data, p)) { error(p, "Invalid data object at hash entry %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } r = journal_file_move_to_object(f, OBJECT_DATA, p, &o); if (r < 0) return r; next = le64toh(o->data.next_hash_offset); if (next != 0 && next <= p) { error(p, "Hash chain has a cycle in hash entry %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } if (le64toh(o->data.hash) % n != i) { error(p, "Hash value mismatch in hash entry %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } r = verify_data(f, o, p, entry_fd, n_entries, entry_array_fd, n_entry_arrays); if (r < 0) return r; last = p; p = next; } if (last != le64toh(f->data_hash_table[i].tail_hash_offset)) { error(p, "Tail hash pointer mismatch in hash table"); return -EBADMSG; } } return 0; }
static int verify_data( JournalFile *f, Object *o, uint64_t p, int entry_fd, uint64_t n_entries, int entry_array_fd, uint64_t n_entry_arrays) { uint64_t i, n, a, last, q; int r; assert(f); assert(o); assert(entry_fd >= 0); assert(entry_array_fd >= 0); n = le64toh(o->data.n_entries); a = le64toh(o->data.entry_array_offset); /* Entry array means at least two objects */ if (a && n < 2) { error(p, "Entry array present (entry_array_offset="OFSfmt", but n_entries=%"PRIu64")", a, n); return -EBADMSG; } if (n == 0) return 0; /* We already checked that earlier */ assert(o->data.entry_offset); last = q = le64toh(o->data.entry_offset); r = entry_points_to_data(f, entry_fd, n_entries, q, p); if (r < 0) return r; i = 1; while (i < n) { uint64_t next, m, j; if (a == 0) { error(p, "Array chain too short"); return -EBADMSG; } if (!contains_uint64(f->mmap, entry_array_fd, n_entry_arrays, a)) { error(p, "Invalid array offset "OFSfmt, a); return -EBADMSG; } r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); if (r < 0) return r; next = le64toh(o->entry_array.next_entry_array_offset); if (next != 0 && next <= a) { error(p, "Array chain has cycle (jumps back from "OFSfmt" to "OFSfmt")", a, next); return -EBADMSG; } m = journal_file_entry_array_n_items(o); for (j = 0; i < n && j < m; i++, j++) { q = le64toh(o->entry_array.items[j]); if (q <= last) { error(p, "Data object's entry array not sorted"); return -EBADMSG; } last = q; r = entry_points_to_data(f, entry_fd, n_entries, q, p); if (r < 0) return r; /* Pointer might have moved, reposition */ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); if (r < 0) return r; } a = next; } return 0; }
static int entry_points_to_data( JournalFile *f, int entry_fd, uint64_t n_entries, uint64_t entry_p, uint64_t data_p) { int r; uint64_t i, n, a; Object *o; bool found = false; assert(f); assert(entry_fd >= 0); if (!contains_uint64(f->mmap, entry_fd, n_entries, entry_p)) { error(data_p, "Data object references invalid entry at "OFSfmt, entry_p); return -EBADMSG; } r = journal_file_move_to_object(f, OBJECT_ENTRY, entry_p, &o); if (r < 0) return r; n = journal_file_entry_n_items(o); for (i = 0; i < n; i++) if (le64toh(o->entry.items[i].object_offset) == data_p) { found = true; break; } if (!found) { error(entry_p, "Data object at "OFSfmt" not referenced by linked entry", data_p); return -EBADMSG; } /* Check if this entry is also in main entry array. Since the * main entry array has already been verified we can rely on * its consistency. */ i = 0; n = le64toh(f->header->n_entries); a = le64toh(f->header->entry_array_offset); while (i < n) { uint64_t m, u; r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); if (r < 0) return r; m = journal_file_entry_array_n_items(o); u = MIN(n - i, m); if (entry_p <= le64toh(o->entry_array.items[u-1])) { uint64_t x, y, z; x = 0; y = u; while (x < y) { z = (x + y) / 2; if (le64toh(o->entry_array.items[z]) == entry_p) return 0; if (x + 1 >= y) break; if (entry_p < le64toh(o->entry_array.items[z])) y = z; else x = z; } error(entry_p, "Entry object doesn't exist in main entry array"); return -EBADMSG; } i += u; a = le64toh(o->entry_array.next_entry_array_offset); } return 0; }
static bool check_compressed(uint64_t compress_threshold, uint64_t data_size) { dual_timestamp ts; JournalFile *f; struct iovec iovec; Object *o; uint64_t p; char t[] = "/tmp/journal-XXXXXX"; char data[2048] = {0}; bool is_compressed; int r; assert_se(data_size <= sizeof(data)); test_setup_logging(LOG_DEBUG); assert_se(mkdtemp(t)); assert_se(chdir(t) >= 0); assert_se(journal_file_open(-1, "test.journal", O_RDWR|O_CREAT, 0666, true, compress_threshold, true, NULL, NULL, NULL, NULL, &f) == 0); dual_timestamp_get(&ts); iovec.iov_base = (void*) data; iovec.iov_len = data_size; assert_se(journal_file_append_entry(f, &ts, NULL, &iovec, 1, NULL, NULL, NULL) == 0); #if HAVE_GCRYPT journal_file_append_tag(f); #endif journal_file_dump(f); /* We have to partially reimplement some of the dump logic, because the normal next_entry does the * decompression for us. */ p = le64toh(f->header->header_size); for (;;) { r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o); assert_se(r == 0); if (o->object.type == OBJECT_DATA) break; assert_se(p < le64toh(f->header->tail_object_offset)); p = p + ALIGN64(le64toh(o->object.size)); } is_compressed = (o->object.flags & OBJECT_COMPRESSION_MASK) != 0; (void) journal_file_close(f); log_info("Done..."); if (arg_keep) log_info("Not removing %s", t); else { journal_directory_vacuum(".", 3000000, 0, 0, NULL, true); assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0); } puts("------------------------------------------------------------"); return is_compressed; }
static int verify_data( JournalFile *f, Object *o, uint64_t p, int entry_fd, uint64_t n_entries, int entry_array_fd, uint64_t n_entry_arrays) { uint64_t i, n, a, last, q; int r; assert(f); assert(o); assert(entry_fd >= 0); assert(entry_array_fd >= 0); n = le64toh(o->data.n_entries); a = le64toh(o->data.entry_array_offset); /* We already checked this earlier */ assert(n > 0); last = q = le64toh(o->data.entry_offset); r = entry_points_to_data(f, entry_fd, n_entries, q, p); if (r < 0) return r; i = 1; while (i < n) { uint64_t next, m, j; if (a == 0) { log_error("Array chain too short at %llu", (unsigned long long) p); return -EBADMSG; } if (!contains_uint64(f->mmap, entry_array_fd, n_entry_arrays, a)) { log_error("Invalid array at %llu", (unsigned long long) p); return -EBADMSG; } r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); if (r < 0) return r; next = le64toh(o->entry_array.next_entry_array_offset); if (next != 0 && next <= a) { log_error("Array chain has cycle at %llu", (unsigned long long) p); return -EBADMSG; } m = journal_file_entry_array_n_items(o); for (j = 0; i < n && j < m; i++, j++) { q = le64toh(o->entry_array.items[j]); if (q <= last) { log_error("Data object's entry array not sorted at %llu", (unsigned long long) p); return -EBADMSG; } last = q; r = entry_points_to_data(f, entry_fd, n_entries, q, p); if (r < 0) return r; /* Pointer might have moved, reposition */ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); if (r < 0) return r; } a = next; } return 0; }