static int entry_points_to_data( JournalFile *f, int entry_fd, uint64_t n_entries, uint64_t entry_p, uint64_t data_p) { int r; uint64_t i, n, a; Object *o; bool found = false; assert(f); assert(entry_fd >= 0); if (!contains_uint64(f->mmap, entry_fd, n_entries, entry_p)) { error(data_p, "Data object references invalid entry at "OFSfmt, entry_p); return -EBADMSG; } r = journal_file_move_to_object(f, OBJECT_ENTRY, entry_p, &o); if (r < 0) return r; n = journal_file_entry_n_items(o); for (i = 0; i < n; i++) if (le64toh(o->entry.items[i].object_offset) == data_p) { found = true; break; } if (!found) { error(entry_p, "Data object at "OFSfmt" not referenced by linked entry", data_p); return -EBADMSG; } /* Check if this entry is also in main entry array. Since the * main entry array has already been verified we can rely on * its consistency. */ i = 0; n = le64toh(f->header->n_entries); a = le64toh(f->header->entry_array_offset); while (i < n) { uint64_t m, u; r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); if (r < 0) return r; m = journal_file_entry_array_n_items(o); u = MIN(n - i, m); if (entry_p <= le64toh(o->entry_array.items[u-1])) { uint64_t x, y, z; x = 0; y = u; while (x < y) { z = (x + y) / 2; if (le64toh(o->entry_array.items[z]) == entry_p) return 0; if (x + 1 >= y) break; if (entry_p < le64toh(o->entry_array.items[z])) y = z; else x = z; } error(entry_p, "Entry object doesn't exist in main entry array"); return -EBADMSG; } i += u; a = le64toh(o->entry_array.next_entry_array_offset); } return 0; }
static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o) { uint64_t i; assert(f); assert(offset); assert(o); /* This does various superficial tests about the length an * possible field values. It does not follow any references to * other objects. */ if ((o->object.flags & OBJECT_COMPRESSED) && o->object.type != OBJECT_DATA) return -EBADMSG; switch (o->object.type) { case OBJECT_DATA: { uint64_t h1, h2; if (le64toh(o->data.entry_offset) == 0) log_warning(OFSfmt": unused data (entry_offset==0)", offset); if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0)) { log_error(OFSfmt": bad n_entries: %"PRIu64, offset, o->data.n_entries); return -EBADMSG; } if (le64toh(o->object.size) - offsetof(DataObject, payload) <= 0) { log_error(OFSfmt": bad object size (<= %zu): %"PRIu64, offset, offsetof(DataObject, payload), le64toh(o->object.size)); return -EBADMSG; } h1 = le64toh(o->data.hash); if (o->object.flags & OBJECT_COMPRESSED) { #ifdef HAVE_XZ void *b = NULL; uint64_t alloc = 0, b_size; if (!uncompress_blob(o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload), &b, &alloc, &b_size, 0)) { log_error(OFSfmt": uncompression failed", offset); return -EBADMSG; } h2 = hash64(b, b_size); free(b); #else log_error("Compression is not supported"); return -EPROTONOSUPPORT; #endif } else h2 = hash64(o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload)); if (h1 != h2) { log_error(OFSfmt": invalid hash (%08"PRIx64" vs. %08"PRIx64, offset, h1, h2); return -EBADMSG; } if (!VALID64(o->data.next_hash_offset) || !VALID64(o->data.next_field_offset) || !VALID64(o->data.entry_offset) || !VALID64(o->data.entry_array_offset)) { log_error(OFSfmt": invalid offset (next_hash_offset="OFSfmt", next_field_offset="OFSfmt", entry_offset="OFSfmt", entry_array_offset="OFSfmt, offset, o->data.next_hash_offset, o->data.next_field_offset, o->data.entry_offset, o->data.entry_array_offset); return -EBADMSG; } break; } case OBJECT_FIELD: if (le64toh(o->object.size) - offsetof(FieldObject, payload) <= 0) { log_error(OFSfmt": bad field size (<= %zu): %"PRIu64, offset, offsetof(FieldObject, payload), le64toh(o->object.size)); return -EBADMSG; } if (!VALID64(o->field.next_hash_offset) || !VALID64(o->field.head_data_offset)) { log_error(OFSfmt": invalid offset (next_hash_offset="OFSfmt", head_data_offset="OFSfmt, offset, o->field.next_hash_offset, o->field.head_data_offset); return -EBADMSG; } break; case OBJECT_ENTRY: if ((le64toh(o->object.size) - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0) { log_error(OFSfmt": bad entry size (<= %zu): %"PRIu64, offset, offsetof(EntryObject, items), le64toh(o->object.size)); return -EBADMSG; } if ((le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0) { log_error(OFSfmt": invalid number items in entry: %"PRIu64, offset, (le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem)); return -EBADMSG; } if (le64toh(o->entry.seqnum) <= 0) { log_error(OFSfmt": invalid entry seqnum: %"PRIx64, offset, le64toh(o->entry.seqnum)); return -EBADMSG; } if (!VALID_REALTIME(le64toh(o->entry.realtime))) { log_error(OFSfmt": invalid entry realtime timestamp: %"PRIu64, offset, le64toh(o->entry.realtime)); return -EBADMSG; } if (!VALID_MONOTONIC(le64toh(o->entry.monotonic))) { log_error(OFSfmt": invalid entry monotonic timestamp: %"PRIu64, offset, le64toh(o->entry.monotonic)); return -EBADMSG; } for (i = 0; i < journal_file_entry_n_items(o); i++) { if (o->entry.items[i].object_offset == 0 || !VALID64(o->entry.items[i].object_offset)) { log_error(OFSfmt": invalid entry item (%"PRIu64"/%"PRIu64" offset: "OFSfmt, offset, i, journal_file_entry_n_items(o), o->entry.items[i].object_offset); return -EBADMSG; } } break; case OBJECT_DATA_HASH_TABLE: case OBJECT_FIELD_HASH_TABLE: if ((le64toh(o->object.size) - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 || (le64toh(o->object.size) - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0) { log_error(OFSfmt": invalid %s hash table size: %"PRIu64, offset, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", le64toh(o->object.size)); return -EBADMSG; } for (i = 0; i < journal_file_hash_table_n_items(o); i++) { if (o->hash_table.items[i].head_hash_offset != 0 && !VALID64(le64toh(o->hash_table.items[i].head_hash_offset))) { log_error(OFSfmt": invalid %s hash table item (%"PRIu64"/%"PRIu64") head_hash_offset: "OFSfmt, offset, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", i, journal_file_hash_table_n_items(o), le64toh(o->hash_table.items[i].head_hash_offset)); return -EBADMSG; } if (o->hash_table.items[i].tail_hash_offset != 0 && !VALID64(le64toh(o->hash_table.items[i].tail_hash_offset))) { log_error(OFSfmt": invalid %s hash table item (%"PRIu64"/%"PRIu64") tail_hash_offset: "OFSfmt, offset, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", i, journal_file_hash_table_n_items(o), le64toh(o->hash_table.items[i].tail_hash_offset)); return -EBADMSG; } if ((o->hash_table.items[i].head_hash_offset != 0) != (o->hash_table.items[i].tail_hash_offset != 0)) { log_error(OFSfmt": invalid %s hash table item (%"PRIu64"/%"PRIu64"): head_hash_offset="OFSfmt" tail_hash_offset="OFSfmt, offset, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", i, journal_file_hash_table_n_items(o), le64toh(o->hash_table.items[i].head_hash_offset), le64toh(o->hash_table.items[i].tail_hash_offset)); return -EBADMSG; } } break; case OBJECT_ENTRY_ARRAY: if ((le64toh(o->object.size) - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 || (le64toh(o->object.size) - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0) { log_error(OFSfmt": invalid object entry array size: %"PRIu64, offset, le64toh(o->object.size)); return -EBADMSG; } if (!VALID64(o->entry_array.next_entry_array_offset)) { log_error(OFSfmt": invalid object entry array next_entry_array_offset: "OFSfmt, offset, o->entry_array.next_entry_array_offset); return -EBADMSG; } for (i = 0; i < journal_file_entry_array_n_items(o); i++) if (le64toh(o->entry_array.items[i]) != 0 && !VALID64(le64toh(o->entry_array.items[i]))) { log_error(OFSfmt": invalid object entry array item (%"PRIu64"/%"PRIu64"): "OFSfmt, offset, i, journal_file_entry_array_n_items(o), le64toh(o->entry_array.items[i])); return -EBADMSG; } break; case OBJECT_TAG: if (le64toh(o->object.size) != sizeof(TagObject)) { log_error(OFSfmt": invalid object tag size: %"PRIu64, offset, le64toh(o->object.size)); return -EBADMSG; } if (!VALID_EPOCH(o->tag.epoch)) { log_error(OFSfmt": invalid object tag epoch: %"PRIu64, offset, o->tag.epoch); return -EBADMSG; } break; } return 0; }
static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o) { uint64_t i; assert(f); assert(offset); assert(o); /* This does various superficial tests about the length an * possible field values. It does not follow any references to * other objects. */ if ((o->object.flags & OBJECT_COMPRESSED_XZ) && o->object.type != OBJECT_DATA) { error(offset, "Found compressed object that isn't of type DATA, which is not allowed."); return -EBADMSG; } switch (o->object.type) { case OBJECT_DATA: { uint64_t h1, h2; int compression, r; if (le64toh(o->data.entry_offset) == 0) warning(offset, "Unused data (entry_offset==0)"); if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0)) { error(offset, "Bad n_entries: %"PRIu64, o->data.n_entries); return -EBADMSG; } if (le64toh(o->object.size) - offsetof(DataObject, payload) <= 0) { error(offset, "Bad object size (<= %zu): %"PRIu64, offsetof(DataObject, payload), le64toh(o->object.size)); return -EBADMSG; } h1 = le64toh(o->data.hash); compression = o->object.flags & OBJECT_COMPRESSION_MASK; if (compression) { _cleanup_free_ void *b = NULL; size_t alloc = 0, b_size; r = decompress_blob(compression, o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload), &b, &alloc, &b_size, 0); if (r < 0) { error(offset, "%s decompression failed: %s", object_compressed_to_string(compression), strerror(-r)); return r; } h2 = hash64(b, b_size); } else h2 = hash64(o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload)); if (h1 != h2) { error(offset, "Invalid hash (%08"PRIx64" vs. %08"PRIx64, h1, h2); return -EBADMSG; } if (!VALID64(o->data.next_hash_offset) || !VALID64(o->data.next_field_offset) || !VALID64(o->data.entry_offset) || !VALID64(o->data.entry_array_offset)) { error(offset, "Invalid offset (next_hash_offset="OFSfmt", next_field_offset="OFSfmt", entry_offset="OFSfmt", entry_array_offset="OFSfmt, o->data.next_hash_offset, o->data.next_field_offset, o->data.entry_offset, o->data.entry_array_offset); return -EBADMSG; } break; } case OBJECT_FIELD: if (le64toh(o->object.size) - offsetof(FieldObject, payload) <= 0) { error(offset, "Bad field size (<= %zu): %"PRIu64, offsetof(FieldObject, payload), le64toh(o->object.size)); return -EBADMSG; } if (!VALID64(o->field.next_hash_offset) || !VALID64(o->field.head_data_offset)) { error(offset, "Invalid offset (next_hash_offset="OFSfmt", head_data_offset="OFSfmt, o->field.next_hash_offset, o->field.head_data_offset); return -EBADMSG; } break; case OBJECT_ENTRY: if ((le64toh(o->object.size) - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0) { error(offset, "Bad entry size (<= %zu): %"PRIu64, offsetof(EntryObject, items), le64toh(o->object.size)); return -EBADMSG; } if ((le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0) { error(offset, "Invalid number items in entry: %"PRIu64, (le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem)); return -EBADMSG; } if (le64toh(o->entry.seqnum) <= 0) { error(offset, "Invalid entry seqnum: %"PRIx64, le64toh(o->entry.seqnum)); return -EBADMSG; } if (!VALID_REALTIME(le64toh(o->entry.realtime))) { error(offset, "Invalid entry realtime timestamp: %"PRIu64, le64toh(o->entry.realtime)); return -EBADMSG; } if (!VALID_MONOTONIC(le64toh(o->entry.monotonic))) { error(offset, "Invalid entry monotonic timestamp: %"PRIu64, le64toh(o->entry.monotonic)); return -EBADMSG; } for (i = 0; i < journal_file_entry_n_items(o); i++) { if (o->entry.items[i].object_offset == 0 || !VALID64(o->entry.items[i].object_offset)) { error(offset, "Invalid entry item (%"PRIu64"/%"PRIu64" offset: "OFSfmt, i, journal_file_entry_n_items(o), o->entry.items[i].object_offset); return -EBADMSG; } } break; case OBJECT_DATA_HASH_TABLE: case OBJECT_FIELD_HASH_TABLE: if ((le64toh(o->object.size) - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 || (le64toh(o->object.size) - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0) { error(offset, "Invalid %s hash table size: %"PRIu64, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", le64toh(o->object.size)); return -EBADMSG; } for (i = 0; i < journal_file_hash_table_n_items(o); i++) { if (o->hash_table.items[i].head_hash_offset != 0 && !VALID64(le64toh(o->hash_table.items[i].head_hash_offset))) { error(offset, "Invalid %s hash table item (%"PRIu64"/%"PRIu64") head_hash_offset: "OFSfmt, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", i, journal_file_hash_table_n_items(o), le64toh(o->hash_table.items[i].head_hash_offset)); return -EBADMSG; } if (o->hash_table.items[i].tail_hash_offset != 0 && !VALID64(le64toh(o->hash_table.items[i].tail_hash_offset))) { error(offset, "Invalid %s hash table item (%"PRIu64"/%"PRIu64") tail_hash_offset: "OFSfmt, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", i, journal_file_hash_table_n_items(o), le64toh(o->hash_table.items[i].tail_hash_offset)); return -EBADMSG; } if ((o->hash_table.items[i].head_hash_offset != 0) != (o->hash_table.items[i].tail_hash_offset != 0)) { error(offset, "Invalid %s hash table item (%"PRIu64"/%"PRIu64"): head_hash_offset="OFSfmt" tail_hash_offset="OFSfmt, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", i, journal_file_hash_table_n_items(o), le64toh(o->hash_table.items[i].head_hash_offset), le64toh(o->hash_table.items[i].tail_hash_offset)); return -EBADMSG; } } break; case OBJECT_ENTRY_ARRAY: if ((le64toh(o->object.size) - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 || (le64toh(o->object.size) - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0) { error(offset, "Invalid object entry array size: %"PRIu64, le64toh(o->object.size)); return -EBADMSG; } if (!VALID64(o->entry_array.next_entry_array_offset)) { error(offset, "Invalid object entry array next_entry_array_offset: "OFSfmt, o->entry_array.next_entry_array_offset); return -EBADMSG; } for (i = 0; i < journal_file_entry_array_n_items(o); i++) if (le64toh(o->entry_array.items[i]) != 0 && !VALID64(le64toh(o->entry_array.items[i]))) { error(offset, "Invalid object entry array item (%"PRIu64"/%"PRIu64"): "OFSfmt, i, journal_file_entry_array_n_items(o), le64toh(o->entry_array.items[i])); return -EBADMSG; } break; case OBJECT_TAG: if (le64toh(o->object.size) != sizeof(TagObject)) { error(offset, "Invalid object tag size: %"PRIu64, le64toh(o->object.size)); return -EBADMSG; } if (!VALID_EPOCH(o->tag.epoch)) { error(offset, "Invalid object tag epoch: %"PRIu64, o->tag.epoch); return -EBADMSG; } break; } return 0; }
static int journal_file_object_verify(JournalFile *f, Object *o) { uint64_t i; assert(f); assert(o); /* This does various superficial tests about the length an * possible field values. It does not follow any references to * other objects. */ if ((o->object.flags & OBJECT_COMPRESSED) && o->object.type != OBJECT_DATA) return -EBADMSG; switch (o->object.type) { case OBJECT_DATA: { uint64_t h1, h2; if (le64toh(o->data.entry_offset) <= 0 || le64toh(o->data.n_entries) <= 0) return -EBADMSG; if (le64toh(o->object.size) - offsetof(DataObject, payload) <= 0) return -EBADMSG; h1 = le64toh(o->data.hash); if (o->object.flags & OBJECT_COMPRESSED) { #ifdef HAVE_XZ void *b = NULL; uint64_t alloc = 0, b_size; if (!uncompress_blob(o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload), &b, &alloc, &b_size)) return -EBADMSG; h2 = hash64(b, b_size); free(b); #else return -EPROTONOSUPPORT; #endif } else h2 = hash64(o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload)); if (h1 != h2) return -EBADMSG; if (!VALID64(o->data.next_hash_offset) || !VALID64(o->data.next_field_offset) || !VALID64(o->data.entry_offset) || !VALID64(o->data.entry_array_offset)) return -EBADMSG; break; } case OBJECT_FIELD: if (le64toh(o->object.size) - offsetof(FieldObject, payload) <= 0) return -EBADMSG; if (!VALID64(o->field.next_hash_offset) || !VALID64(o->field.head_data_offset)) return -EBADMSG; break; case OBJECT_ENTRY: if ((le64toh(o->object.size) - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0) return -EBADMSG; if ((le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0) return -EBADMSG; if (le64toh(o->entry.seqnum) <= 0 || !VALID_REALTIME(le64toh(o->entry.realtime)) || !VALID_MONOTONIC(le64toh(o->entry.monotonic))) return -EBADMSG; for (i = 0; i < journal_file_entry_n_items(o); i++) { if (o->entry.items[i].object_offset == 0 || !VALID64(o->entry.items[i].object_offset)) return -EBADMSG; } break; case OBJECT_DATA_HASH_TABLE: case OBJECT_FIELD_HASH_TABLE: if ((le64toh(o->object.size) - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0) return -EBADMSG; if ((le64toh(o->object.size) - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0) return -EBADMSG; for (i = 0; i < journal_file_hash_table_n_items(o); i++) { if (o->hash_table.items[i].head_hash_offset != 0 && !VALID64(le64toh(o->hash_table.items[i].head_hash_offset))) return -EBADMSG; if (o->hash_table.items[i].tail_hash_offset != 0 && !VALID64(le64toh(o->hash_table.items[i].tail_hash_offset))) return -EBADMSG; if ((o->hash_table.items[i].head_hash_offset != 0) != (o->hash_table.items[i].tail_hash_offset != 0)) return -EBADMSG; } break; case OBJECT_ENTRY_ARRAY: if ((le64toh(o->object.size) - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0) return -EBADMSG; if ((le64toh(o->object.size) - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0) return -EBADMSG; if (!VALID64(o->entry_array.next_entry_array_offset)) return -EBADMSG; for (i = 0; i < journal_file_entry_array_n_items(o); i++) if (o->entry_array.items[i] != 0 && !VALID64(o->entry_array.items[i])) return -EBADMSG; break; case OBJECT_TAG: if (le64toh(o->object.size) != sizeof(TagObject)) return -EBADMSG; if (!VALID_EPOCH(o->tag.epoch)) return -EBADMSG; break; } return 0; }