int LUKS_wipe_header_areas(struct luks_phdr *hdr, struct crypt_device *ctx) { int i, r; uint64_t offset, length; size_t wipe_block; /* Wipe complete header, keyslots and padding areas with zeroes. */ offset = 0; length = (uint64_t)hdr->payloadOffset * SECTOR_SIZE; wipe_block = 1024 * 1024; /* On detached header or bogus header, wipe at least the first 4k */ if (length == 0 || length > (LUKS_MAX_KEYSLOT_SIZE * LUKS_NUMKEYS)) { length = 4096; wipe_block = 4096; } log_dbg(ctx, "Wiping LUKS areas (0x%06" PRIx64 " - 0x%06" PRIx64") with zeroes.", offset, length + offset); r = crypt_wipe_device(ctx, crypt_metadata_device(ctx), CRYPT_WIPE_ZERO, offset, length, wipe_block, NULL, NULL); if (r < 0) return r; /* Wipe keyslots areas */ wipe_block = 1024 * 1024; for (i = 0; i < LUKS_NUMKEYS; i++) { r = LUKS_keyslot_area(hdr, i, &offset, &length); if (r < 0) return r; /* Ignore too big LUKS1 keyslots here */ if (length > LUKS_MAX_KEYSLOT_SIZE || offset > (LUKS_MAX_KEYSLOT_SIZE - length)) continue; if (length == 0 || offset < 4096) return -EINVAL; log_dbg(ctx, "Wiping keyslot %i area (0x%06" PRIx64 " - 0x%06" PRIx64") with random data.", i, offset, length + offset); r = crypt_wipe_device(ctx, crypt_metadata_device(ctx), CRYPT_WIPE_RANDOM, offset, length, wipe_block, NULL, NULL); if (r < 0) return r; } return r; }
static int LUKS_check_device_size(struct crypt_device *ctx, const struct luks_phdr *hdr, int falloc) { struct device *device = crypt_metadata_device(ctx); uint64_t dev_sectors, hdr_sectors; if (!hdr->keyBytes) return -EINVAL; if (device_size(device, &dev_sectors)) { log_dbg("Cannot get device size for device %s.", device_path(device)); return -EIO; } dev_sectors >>= SECTOR_SHIFT; hdr_sectors = LUKS_device_sectors(hdr); log_dbg("Key length %u, device size %" PRIu64 " sectors, header size %" PRIu64 " sectors.", hdr->keyBytes, dev_sectors, hdr_sectors); if (hdr_sectors > dev_sectors) { /* If it is header file, increase its size */ if (falloc && !device_fallocate(device, hdr_sectors << SECTOR_SHIFT)) return 0; log_err(ctx, _("Device %s is too small. (LUKS1 requires at least %" PRIu64 " bytes.)"), device_path(device), hdr_sectors * SECTOR_SIZE); return -EINVAL; } return 0; }
/* Create verity hash */ int VERITY_create(struct crypt_device *cd, struct crypt_params_verity *verity_hdr, char *root_hash, size_t root_hash_size) { unsigned pgsize = crypt_getpagesize(); if (verity_hdr->salt_size > 256) return -EINVAL; if (verity_hdr->data_block_size > pgsize) log_err(cd, "WARNING: Kernel cannot activate device if data " "block size exceeds page size (%u).\n", pgsize); off_t sz = verity_hdr->data_size; return VERITY_create_or_verify_hash(cd, 0, verity_hdr->hash_type, verity_hdr->hash_name, crypt_metadata_device(cd), crypt_data_device(cd), verity_hdr->hash_block_size, verity_hdr->data_block_size, verity_hdr->data_size, VERITY_hash_offset_block(verity_hdr), root_hash, root_hash_size, verity_hdr->salt, verity_hdr->salt_size); }
static int LUKS_check_device_size(struct crypt_device *ctx, size_t keyLength) { struct device *device = crypt_metadata_device(ctx); uint64_t dev_sectors, hdr_sectors; if (!keyLength) return -EINVAL; if(device_size(device, &dev_sectors)) { log_dbg("Cannot get device size for device %s.", device_path(device)); return -EIO; } dev_sectors >>= SECTOR_SHIFT; hdr_sectors = LUKS_device_sectors(keyLength); log_dbg("Key length %zu, device size %" PRIu64 " sectors, header size %" PRIu64 " sectors.",keyLength, dev_sectors, hdr_sectors); if (hdr_sectors > dev_sectors) { log_err(ctx, _("Device %s is too small. (LUKS requires at least %" PRIu64 " bytes.)\n"), device_path(device), hdr_sectors * SECTOR_SIZE); return -EINVAL; } return 0; }
int TCRYPT_read_phdr(struct crypt_device *cd, struct tcrypt_phdr *hdr, struct crypt_params_tcrypt *params) { struct device *base_device, *device = crypt_metadata_device(cd); ssize_t hdr_size = sizeof(struct tcrypt_phdr); char *base_device_path; int devfd = 0, r, bs; assert(sizeof(struct tcrypt_phdr) == 512); log_dbg("Reading TCRYPT header of size %zu bytes from device %s.", hdr_size, device_path(device)); bs = device_block_size(device); if (bs < 0) return bs; if (params->flags & CRYPT_TCRYPT_SYSTEM_HEADER && crypt_dev_is_partition(device_path(device))) { base_device_path = crypt_get_base_device(device_path(device)); log_dbg("Reading TCRYPT system header from device %s.", base_device_path ?: "?"); if (!base_device_path) return -EINVAL; r = device_alloc(&base_device, base_device_path); if (r < 0) return r; devfd = device_open(base_device, O_RDONLY); free(base_device_path); device_free(base_device); } else
int LUKS_write_phdr(struct luks_phdr *hdr, struct crypt_device *ctx) { struct device *device = crypt_metadata_device(ctx); ssize_t hdr_size = sizeof(struct luks_phdr); int devfd = 0; unsigned int i; struct luks_phdr convHdr; int r; log_dbg(ctx, "Updating LUKS header of size %zu on device %s", sizeof(struct luks_phdr), device_path(device)); r = LUKS_check_device_size(ctx, hdr, 1); if (r) return r; devfd = device_open(ctx, device, O_RDWR); if (devfd < 0) { if (errno == EACCES) log_err(ctx, _("Cannot write to device %s, permission denied."), device_path(device)); else log_err(ctx, _("Cannot open device %s."), device_path(device)); return -EINVAL; } memcpy(&convHdr, hdr, hdr_size); memset(&convHdr._padding, 0, sizeof(convHdr._padding)); /* Convert every uint16/32_t item to network byte order */ convHdr.version = htons(hdr->version); convHdr.payloadOffset = htonl(hdr->payloadOffset); convHdr.keyBytes = htonl(hdr->keyBytes); convHdr.mkDigestIterations = htonl(hdr->mkDigestIterations); for(i = 0; i < LUKS_NUMKEYS; ++i) { convHdr.keyblock[i].active = htonl(hdr->keyblock[i].active); convHdr.keyblock[i].passwordIterations = htonl(hdr->keyblock[i].passwordIterations); convHdr.keyblock[i].keyMaterialOffset = htonl(hdr->keyblock[i].keyMaterialOffset); convHdr.keyblock[i].stripes = htonl(hdr->keyblock[i].stripes); } r = write_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device), &convHdr, hdr_size, 0) < hdr_size ? -EIO : 0; if (r) log_err(ctx, _("Error during update of LUKS header on device %s."), device_path(device)); device_sync(ctx, device); /* Re-read header from disk to be sure that in-memory and on-disk data are the same. */ if (!r) { r = LUKS_read_phdr(hdr, 1, 0, ctx); if (r) log_err(ctx, _("Error re-reading LUKS header after update on device %s."), device_path(device)); } return r; }
/* Write verity superblock to disk */ int VERITY_write_sb(struct crypt_device *cd, uint64_t sb_offset, const char *uuid_string, struct crypt_params_verity *params) { struct device *device = crypt_metadata_device(cd); int bsize = device_block_size(device); struct verity_sb sb = {}; ssize_t hdr_size = sizeof(struct verity_sb); uuid_t uuid; int r, devfd = 0; log_dbg("Updating VERITY header of size %zu on device %s, offset %" PRIu64 ".", sizeof(struct verity_sb), device_path(device), sb_offset); if (!uuid_string || uuid_parse(uuid_string, uuid) == -1) { log_err(cd, _("Wrong VERITY UUID format provided on device %s. \n"), device_path(device)); return -EINVAL; } if (params->flags & CRYPT_VERITY_NO_HEADER) { log_err(cd, _("Verity device %s doesn't use on-disk header.\n"), device_path(device)); return -EINVAL; } devfd = device_open(device, O_RDWR); if(devfd == -1) { log_err(cd, _("Cannot open device %s.\n"), device_path(device)); return -EINVAL; } memcpy(&sb.signature, VERITY_SIGNATURE, sizeof(sb.signature)); sb.version = cpu_to_le32(1); sb.hash_type = cpu_to_le32(params->hash_type); sb.data_block_size = cpu_to_le32(params->data_block_size); sb.hash_block_size = cpu_to_le32(params->hash_block_size); sb.salt_size = cpu_to_le16(params->salt_size); sb.data_blocks = cpu_to_le64(params->data_size); strncpy((char *)sb.algorithm, params->hash_name, sizeof(sb.algorithm)); memcpy(sb.salt, params->salt, params->salt_size); memcpy(sb.uuid, uuid, sizeof(sb.uuid)); r = write_lseek_blockwise(devfd, bsize, (char*)&sb, hdr_size, sb_offset) < hdr_size ? -EIO : 0; if (r) log_err(cd, _("Error during update of verity header on device %s.\n"), device_path(device)); close(devfd); return r; }
int LUKS_read_phdr(struct luks_phdr *hdr, int require_luks_device, int repair, struct crypt_device *ctx) { struct device *device = crypt_metadata_device(ctx); ssize_t hdr_size = sizeof(struct luks_phdr); int devfd = 0, r = 0; /* LUKS header starts at offset 0, first keyslot on LUKS_ALIGN_KEYSLOTS */ assert(sizeof(struct luks_phdr) <= LUKS_ALIGN_KEYSLOTS); /* Stripes count cannot be changed without additional code fixes yet */ assert(LUKS_STRIPES == 4000); if (repair && !require_luks_device) return -EINVAL; log_dbg("Reading LUKS header of size %zu from device %s", hdr_size, device_path(device)); devfd = device_open(device, O_RDONLY); if (devfd < 0) { log_err(ctx, _("Cannot open device %s."), device_path(device)); return -EINVAL; } if (read_blockwise(devfd, device_block_size(device), device_alignment(device), hdr, hdr_size) < hdr_size) r = -EIO; else r = _check_and_convert_hdr(device_path(device), hdr, require_luks_device, repair, ctx); if (!r) r = LUKS_check_device_size(ctx, hdr, 0); /* * Cryptsetup 1.0.0 did not align keyslots to 4k (very rare version). * Disable direct-io to avoid possible IO errors if underlying device * has bigger sector size. */ if (!r && hdr->keyblock[0].keyMaterialOffset * SECTOR_SIZE < LUKS_ALIGN_KEYSLOTS) { log_dbg("Old unaligned LUKS keyslot detected, disabling direct-io."); device_disable_direct_io(device); } close(devfd); return r; }
int LUKS_del_key(unsigned int keyIndex, struct luks_phdr *hdr, struct crypt_device *ctx) { struct device *device = crypt_metadata_device(ctx); unsigned int startOffset, endOffset; int r; r = LUKS_read_phdr(hdr, 1, 0, ctx); if (r) return r; r = LUKS_keyslot_set(hdr, keyIndex, 0); if (r) { log_err(ctx, _("Key slot %d is invalid, please select keyslot between 0 and %d.\n"), keyIndex, LUKS_NUMKEYS - 1); return r; } /* secure deletion of key material */ startOffset = hdr->keyblock[keyIndex].keyMaterialOffset; endOffset = startOffset + AF_split_sectors(hdr->keyBytes, hdr->keyblock[keyIndex].stripes); r = crypt_wipe(device, startOffset * SECTOR_SIZE, (endOffset - startOffset) * SECTOR_SIZE, CRYPT_WIPE_DISK, 0); if (r) { if (r == -EACCES) { log_err(ctx, _("Cannot write to device %s, permission denied.\n"), device_path(device)); r = -EINVAL; } else log_err(ctx, _("Cannot wipe device %s.\n"), device_path(device)); return r; } /* Wipe keyslot info */ memset(&hdr->keyblock[keyIndex].passwordSalt, 0, LUKS_SALTSIZE); hdr->keyblock[keyIndex].passwordIterations = 0; r = LUKS_write_phdr(hdr, ctx); return r; }
/* Verify verity device using userspace crypto backend */ int VERITY_verify(struct crypt_device *cd, struct crypt_params_verity *verity_hdr, const char *root_hash, size_t root_hash_size) { return VERITY_create_or_verify_hash(cd, 1, verity_hdr->hash_type, verity_hdr->hash_name, crypt_metadata_device(cd), crypt_data_device(cd), verity_hdr->hash_block_size, verity_hdr->data_block_size, verity_hdr->data_size, VERITY_hash_offset_block(verity_hdr), CONST_CAST(char*)root_hash, root_hash_size, verity_hdr->salt, verity_hdr->salt_size); }
int LUKS_read_phdr(struct luks_phdr *hdr, int require_luks_device, int repair, struct crypt_device *ctx) { struct device *device = crypt_metadata_device(ctx); ssize_t hdr_size = sizeof(struct luks_phdr); int devfd = 0, r = 0; /* LUKS header starts at offset 0, first keyslot on LUKS_ALIGN_KEYSLOTS */ assert(sizeof(struct luks_phdr) <= LUKS_ALIGN_KEYSLOTS); /* Stripes count cannot be changed without additional code fixes yet */ assert(LUKS_STRIPES == 4000); if (repair && !require_luks_device) return -EINVAL; log_dbg("Reading LUKS header of size %zu from device %s", hdr_size, device_path(device)); devfd = device_open(device, O_RDONLY); if (devfd == -1) { log_err(ctx, _("Cannot open device %s.\n"), device_path(device)); return -EINVAL; } if (read_blockwise(devfd, device_block_size(device), hdr, hdr_size) < hdr_size) r = -EIO; else r = _check_and_convert_hdr(device_path(device), hdr, require_luks_device, repair, ctx); if (!r) r = LUKS_check_device_size(ctx, hdr->keyBytes); close(devfd); return r; }
int LUKS2_hdr_version_unlocked(struct crypt_device *cd, const char *backup_file) { struct { char magic[LUKS2_MAGIC_L]; uint16_t version; } __attribute__ ((packed)) hdr; struct device *device = NULL; int r = 0, devfd = -1, flags; if (!backup_file) device = crypt_metadata_device(cd); else if (device_alloc(&device, backup_file) < 0) return 0; if (!device) return 0; flags = O_RDONLY; if (device_direct_io(device)) flags |= O_DIRECT; devfd = open(device_path(device), flags); if (devfd < 0) goto err; if ((read_lseek_blockwise(devfd, device_block_size(device), device_alignment(device), &hdr, sizeof(hdr), 0) == sizeof(hdr)) && !memcmp(hdr.magic, LUKS2_MAGIC_1ST, LUKS2_MAGIC_L)) r = (int)be16_to_cpu(hdr.version); err: if (devfd != -1) close(devfd); if (backup_file) device_free(device); return r; }
/* * Convert in-memory LUKS2 header and write it to disk. * This will increase sequence id, write both header copies and calculate checksum. */ int LUKS2_disk_hdr_write(struct crypt_device *cd, struct luks2_hdr *hdr, struct device *device) { char *json_area; const char *json_text; size_t json_area_len; int r; if (hdr->version != 2) { log_dbg("Unsupported LUKS2 header version (%u).", hdr->version); return -EINVAL; } if (hdr->hdr_size != LUKS2_HDR_16K_LEN) { log_dbg("Unsupported LUKS2 header size (%zu).", hdr->hdr_size); return -EINVAL; } r = LUKS2_check_device_size(cd, crypt_metadata_device(cd), LUKS2_hdr_and_areas_size(hdr->jobj), 1); if (r) return r; /* * Allocate and zero JSON area (of proper header size). */ json_area_len = hdr->hdr_size - LUKS2_HDR_BIN_LEN; json_area = malloc(json_area_len); if (!json_area) return -ENOMEM; memset(json_area, 0, json_area_len); /* * Generate text space-efficient JSON representation to json area. */ json_text = json_object_to_json_string_ext(hdr->jobj, JSON_C_TO_STRING_PLAIN | JSON_C_TO_STRING_NOSLASHESCAPE); if (!json_text || !*json_text) { log_dbg("Cannot parse JSON object to text representation."); free(json_area); return -ENOMEM; } if (strlen(json_text) > (json_area_len - 1)) { log_dbg("JSON is too large (%zu > %zu).", strlen(json_text), json_area_len); free(json_area); return -EINVAL; } strncpy(json_area, json_text, json_area_len); /* Increase sequence id before writing it to disk. */ hdr->seqid++; r = device_write_lock(cd, device); if (r) { log_err(cd, _("Failed to acquire write device lock.")); free(json_area); return r; } /* Write primary and secondary header */ r = hdr_write_disk(device, hdr, json_area, 0); if (!r) r = hdr_write_disk(device, hdr, json_area, 1); if (r) log_dbg("LUKS2 header write failed (%d).", r); device_write_unlock(device); /* FIXME: try recovery here? */ free(json_area); return r; }
/* Activate verity device in kernel device-mapper */ int VERITY_activate(struct crypt_device *cd, const char *name, const char *root_hash, size_t root_hash_size, struct crypt_params_verity *verity_hdr, uint32_t activation_flags) { struct crypt_dm_active_device dmd; int r; log_dbg("Trying to activate VERITY device %s using hash %s.", name ?: "[none]", verity_hdr->hash_name); if (verity_hdr->flags & CRYPT_VERITY_CHECK_HASH) { log_dbg("Verification of data in userspace required."); r = VERITY_verify(cd, verity_hdr, root_hash, root_hash_size); if (r < 0) return r; } if (!name) return 0; dmd.target = DM_VERITY; dmd.data_device = crypt_data_device(cd); dmd.u.verity.hash_device = crypt_metadata_device(cd); dmd.u.verity.root_hash = root_hash; dmd.u.verity.root_hash_size = root_hash_size; dmd.u.verity.hash_offset = VERITY_hash_offset_block(verity_hdr), dmd.flags = activation_flags; dmd.size = verity_hdr->data_size * verity_hdr->data_block_size / 512; dmd.uuid = crypt_get_uuid(cd); dmd.u.verity.vp = verity_hdr; r = device_block_adjust(cd, dmd.u.verity.hash_device, DEV_OK, 0, NULL, NULL); if (r) return r; r = device_block_adjust(cd, dmd.data_device, DEV_EXCL, 0, &dmd.size, &dmd.flags); if (r) return r; r = dm_create_device(cd, name, CRYPT_VERITY, &dmd, 0); if (r < 0 && !(dm_flags() & DM_VERITY_SUPPORTED)) { log_err(cd, _("Kernel doesn't support dm-verity mapping.\n")); return -ENOTSUP; } if (r < 0) return r; r = dm_status_verity_ok(cd, name); if (r < 0) return r; if (!r) log_err(cd, _("Verity device detected corruption after activation.\n")); return 0; }
int LUKS_hdr_restore( const char *backup_file, struct luks_phdr *hdr, struct crypt_device *ctx) { struct device *device = crypt_metadata_device(ctx); int r = 0, devfd = -1, diff_uuid = 0; ssize_t buffer_size = 0; char *buffer = NULL, msg[200]; struct luks_phdr hdr_file; r = LUKS_read_phdr_backup(backup_file, &hdr_file, 0, ctx); if (r == -ENOENT) return r; if (!r) buffer_size = LUKS_device_sectors(hdr_file.keyBytes) << SECTOR_SHIFT; if (r || buffer_size < LUKS_ALIGN_KEYSLOTS) { log_err(ctx, _("Backup file doesn't contain valid LUKS header.\n")); r = -EINVAL; goto out; } buffer = crypt_safe_alloc(buffer_size); if (!buffer) { r = -ENOMEM; goto out; } devfd = open(backup_file, O_RDONLY); if (devfd == -1) { log_err(ctx, _("Cannot open header backup file %s.\n"), backup_file); r = -EINVAL; goto out; } if (read(devfd, buffer, buffer_size) < buffer_size) { log_err(ctx, _("Cannot read header backup file %s.\n"), backup_file); r = -EIO; goto out; } close(devfd); r = LUKS_read_phdr(hdr, 0, 0, ctx); if (r == 0) { log_dbg("Device %s already contains LUKS header, checking UUID and offset.", device_path(device)); if(hdr->payloadOffset != hdr_file.payloadOffset || hdr->keyBytes != hdr_file.keyBytes) { log_err(ctx, _("Data offset or key size differs on device and backup, restore failed.\n")); r = -EINVAL; goto out; } if (memcmp(hdr->uuid, hdr_file.uuid, UUID_STRING_L)) diff_uuid = 1; } if (snprintf(msg, sizeof(msg), _("Device %s %s%s"), device_path(device), r ? _("does not contain LUKS header. Replacing header can destroy data on that device.") : _("already contains LUKS header. Replacing header will destroy existing keyslots."), diff_uuid ? _("\nWARNING: real device header has different UUID than backup!") : "") < 0) { r = -ENOMEM; goto out; } if (!crypt_confirm(ctx, msg)) { r = -EINVAL; goto out; } log_dbg("Storing backup of header (%zu bytes) and keyslot area (%zu bytes) to device %s.", sizeof(*hdr), buffer_size - LUKS_ALIGN_KEYSLOTS, device_path(device)); devfd = device_open(device, O_RDWR); if (devfd == -1) { if (errno == EACCES) log_err(ctx, _("Cannot write to device %s, permission denied.\n"), device_path(device)); else log_err(ctx, _("Cannot open device %s.\n"), device_path(device)); r = -EINVAL; goto out; } if (write_blockwise(devfd, device_block_size(device), buffer, buffer_size) < buffer_size) { r = -EIO; goto out; } close(devfd); /* Be sure to reload new data */ r = LUKS_read_phdr(hdr, 1, 0, ctx); out: if (devfd != -1) close(devfd); crypt_safe_free(buffer); return r; }
int LUKS_hdr_backup(const char *backup_file, struct crypt_device *ctx) { struct device *device = crypt_metadata_device(ctx); struct luks_phdr hdr; int r = 0, devfd = -1; ssize_t hdr_size; ssize_t buffer_size; char *buffer = NULL; r = LUKS_read_phdr(&hdr, 1, 0, ctx); if (r) return r; hdr_size = LUKS_device_sectors(hdr.keyBytes) << SECTOR_SHIFT; buffer_size = size_round_up(hdr_size, crypt_getpagesize()); buffer = crypt_safe_alloc(buffer_size); if (!buffer || hdr_size < LUKS_ALIGN_KEYSLOTS || hdr_size > buffer_size) { r = -ENOMEM; goto out; } log_dbg("Storing backup of header (%zu bytes) and keyslot area (%zu bytes).", sizeof(hdr), hdr_size - LUKS_ALIGN_KEYSLOTS); log_dbg("Output backup file size: %zu bytes.", buffer_size); devfd = device_open(device, O_RDONLY); if(devfd == -1) { log_err(ctx, _("Device %s is not a valid LUKS device.\n"), device_path(device)); r = -EINVAL; goto out; } if (read_blockwise(devfd, device_block_size(device), buffer, hdr_size) < hdr_size) { r = -EIO; goto out; } close(devfd); /* Wipe unused area, so backup cannot contain old signatures */ if (hdr.keyblock[0].keyMaterialOffset * SECTOR_SIZE == LUKS_ALIGN_KEYSLOTS) memset(buffer + sizeof(hdr), 0, LUKS_ALIGN_KEYSLOTS - sizeof(hdr)); devfd = open(backup_file, O_CREAT|O_EXCL|O_WRONLY, S_IRUSR); if (devfd == -1) { if (errno == EEXIST) log_err(ctx, _("Requested header backup file %s already exists.\n"), backup_file); else log_err(ctx, _("Cannot create header backup file %s.\n"), backup_file); r = -EINVAL; goto out; } if (write(devfd, buffer, buffer_size) < buffer_size) { log_err(ctx, _("Cannot write header backup file %s.\n"), backup_file); r = -EIO; goto out; } close(devfd); r = 0; out: if (devfd != -1) close(devfd); crypt_memzero(&hdr, sizeof(hdr)); crypt_safe_free(buffer); return r; }
/* Read verity superblock from disk */ int VERITY_read_sb(struct crypt_device *cd, uint64_t sb_offset, char **uuid_string, struct crypt_params_verity *params) { struct device *device = crypt_metadata_device(cd); int bsize = device_block_size(device); struct verity_sb sb = {}; ssize_t hdr_size = sizeof(struct verity_sb); int devfd = 0, sb_version; log_dbg("Reading VERITY header of size %zu on device %s, offset %" PRIu64 ".", sizeof(struct verity_sb), device_path(device), sb_offset); if (params->flags & CRYPT_VERITY_NO_HEADER) { log_err(cd, _("Verity device %s doesn't use on-disk header.\n"), device_path(device)); return -EINVAL; } if (sb_offset % 512) { log_err(cd, _("Unsupported VERITY hash offset.\n")); return -EINVAL; } devfd = device_open(device, O_RDONLY); if(devfd == -1) { log_err(cd, _("Cannot open device %s.\n"), device_path(device)); return -EINVAL; } if(lseek(devfd, sb_offset, SEEK_SET) < 0 || read_blockwise(devfd, bsize, &sb, hdr_size) < hdr_size) { close(devfd); return -EIO; } close(devfd); if (memcmp(sb.signature, VERITY_SIGNATURE, sizeof(sb.signature))) { log_err(cd, _("Device %s is not a valid VERITY device.\n"), device_path(device)); return -EINVAL; } sb_version = le32_to_cpu(sb.version); if (sb_version != 1) { log_err(cd, _("Unsupported VERITY version %d.\n"), sb_version); return -EINVAL; } params->hash_type = le32_to_cpu(sb.hash_type); if (params->hash_type > VERITY_MAX_HASH_TYPE) { log_err(cd, _("Unsupported VERITY hash type %d.\n"), params->hash_type); return -EINVAL; } params->data_block_size = le32_to_cpu(sb.data_block_size); params->hash_block_size = le32_to_cpu(sb.hash_block_size); if (VERITY_BLOCK_SIZE_OK(params->data_block_size) || VERITY_BLOCK_SIZE_OK(params->hash_block_size)) { log_err(cd, _("Unsupported VERITY block size.\n")); return -EINVAL; } params->data_size = le64_to_cpu(sb.data_blocks); params->hash_name = strndup((const char*)sb.algorithm, sizeof(sb.algorithm)); if (!params->hash_name) return -ENOMEM; if (crypt_hash_size(params->hash_name) <= 0) { log_err(cd, _("Hash algorithm %s not supported.\n"), params->hash_name); free(CONST_CAST(char*)params->hash_name); return -EINVAL; } params->salt_size = le16_to_cpu(sb.salt_size); if (params->salt_size > sizeof(sb.salt)) { log_err(cd, _("VERITY header corrupted.\n")); free(CONST_CAST(char*)params->hash_name); return -EINVAL; } params->salt = malloc(params->salt_size); if (!params->salt) { free(CONST_CAST(char*)params->hash_name); return -ENOMEM; } memcpy(CONST_CAST(char*)params->salt, sb.salt, params->salt_size); if ((*uuid_string = malloc(40))) uuid_unparse(sb.uuid, *uuid_string); params->hash_area_offset = sb_offset; return 0; }