/* computes the size of verity hash tree for `file_size' bytes and returns the number of hash tree levels in `verity_levels,' and the number of hashes per level in `level_hashes', if the parameters are non-NULL */ uint64_t verity_get_size(uint64_t file_size, uint32_t *verity_levels, uint32_t *level_hashes) { /* we assume a known metadata size, 4 KiB block size, and SHA-256 to avoid relying on disk content */ uint32_t level = 0; uint64_t total = 0; uint64_t hashes = file_size / FEC_BLOCKSIZE; do { if (level_hashes) { level_hashes[level] = hashes; } hashes = fec_div_round_up(hashes * SHA256_DIGEST_LENGTH, FEC_BLOCKSIZE); total += hashes; ++level; } while (hashes > 1); if (verity_levels) { *verity_levels = level; } return total * FEC_BLOCKSIZE; }
/* launches a maximum number of threads to process a read */ ssize_t process(fec_handle *f, uint8_t *buf, size_t count, uint64_t offset, read_func func) { check(f); check(buf) check(func); if (count == 0) { return 0; } int threads = sysconf(_SC_NPROCESSORS_ONLN); if (threads < WORK_MIN_THREADS) { threads = WORK_MIN_THREADS; } else if (threads > WORK_MAX_THREADS) { threads = WORK_MAX_THREADS; } uint64_t start = (offset / FEC_BLOCKSIZE) * FEC_BLOCKSIZE; size_t blocks = fec_div_round_up(count, FEC_BLOCKSIZE); if ((size_t)threads > blocks) { threads = (int)blocks; } size_t count_per_thread = fec_div_round_up(blocks, threads) * FEC_BLOCKSIZE; size_t left = count; uint64_t pos = offset; uint64_t end = start + count_per_thread; debug("%d threads, %zu bytes per thread (total %zu)", threads, count_per_thread, count); std::vector<pthread_t> handles; process_info info[threads]; ssize_t rc = 0; /* start threads to process queue */ for (int i = 0; i < threads; ++i) { check(left > 0); info[i].id = i; info[i].f = f; info[i].buf = &buf[pos - offset]; info[i].count = (size_t)(end - pos); info[i].offset = pos; info[i].func = func; info[i].rc = -1; info[i].errors = 0; if (info[i].count > left) { info[i].count = left; } pthread_t thread; if (pthread_create(&thread, NULL, __process, &info[i]) != 0) { error("failed to create thread: %s", strerror(errno)); rc = -1; } else { handles.push_back(thread); } pos = end; end += count_per_thread; left -= info[i].count; } check(left == 0); ssize_t nread = 0; /* wait for all threads to complete */ for (auto thread : handles) { process_info *p = NULL; if (pthread_join(thread, (void **)&p) != 0) { error("failed to join thread: %s", strerror(errno)); rc = -1; } else if (!p || p->rc == -1) { rc = -1; } else { nread += p->rc; f->errors += p->errors; } } if (rc == -1) { errno = EIO; return -1; } return nread; }
/* attempts to read and validate an ecc header from file position `offset' */ static int parse_ecc_header(fec_handle *f, uint64_t offset) { check(f); check(f->ecc.rsn > 0 && f->ecc.rsn < FEC_RSM); check(f->size > sizeof(fec_header)); debug("offset = %" PRIu64, offset); if (offset > f->size - sizeof(fec_header)) { return -1; } fec_header header; /* there's obviously no ecc data at this point, so there is no need to call fec_pread to access this data */ if (!raw_pread(f, &header, sizeof(fec_header), offset)) { error("failed to read: %s", strerror(errno)); return -1; } /* move offset back to the beginning of the block for validating header */ offset -= offset % FEC_BLOCKSIZE; if (header.magic != FEC_MAGIC) { return -1; } if (header.version != FEC_VERSION) { error("unsupported ecc version: %u", header.version); return -1; } if (header.size != sizeof(fec_header)) { error("unexpected ecc header size: %u", header.size); return -1; } if (header.roots == 0 || header.roots >= FEC_RSM) { error("invalid ecc roots: %u", header.roots); return -1; } if (f->ecc.roots != (int)header.roots) { error("unexpected number of roots: %d vs %u", f->ecc.roots, header.roots); return -1; } if (header.fec_size % header.roots || header.fec_size % FEC_BLOCKSIZE) { error("inconsistent ecc size %u", header.fec_size); return -1; } /* structure: data | ecc | header */ if (offset < header.fec_size || offset - header.fec_size != header.inp_size) { error("unexpected input size: %" PRIu64 " vs %" PRIu64, offset, header.inp_size); return -1; } f->data_size = header.inp_size; f->ecc.blocks = fec_div_round_up(f->data_size, FEC_BLOCKSIZE); f->ecc.rounds = fec_div_round_up(f->ecc.blocks, f->ecc.rsn); if (header.fec_size != (uint32_t)f->ecc.rounds * f->ecc.roots * FEC_BLOCKSIZE) { error("inconsistent ecc size %u", header.fec_size); return -1; } f->ecc.size = header.fec_size; f->ecc.start = header.inp_size; /* validate encoding data; caller may opt not to use it if invalid */ SHA256_CTX ctx; SHA256_Init(&ctx); uint8_t buf[FEC_BLOCKSIZE]; uint32_t n = 0; uint32_t len = FEC_BLOCKSIZE; while (n < f->ecc.size) { if (len > f->ecc.size - n) { len = f->ecc.size - n; } if (!raw_pread(f, buf, len, f->ecc.start + n)) { error("failed to read ecc: %s", strerror(errno)); return -1; } SHA256_Update(&ctx, buf, len); n += len; } uint8_t hash[SHA256_DIGEST_LENGTH]; SHA256_Final(hash, &ctx); f->ecc.valid = !memcmp(hash, header.hash, SHA256_DIGEST_LENGTH); if (!f->ecc.valid) { warn("ecc data not valid"); } return 0; }