int crypt_benchmark(struct crypt_device *cd, const char *cipher, const char *cipher_mode, size_t volume_key_size, size_t iv_size, size_t buffer_size, double *encryption_mbs, double *decryption_mbs) { struct cipher_perf cp = { .key_length = volume_key_size, .iv_length = iv_size, .buffer_size = buffer_size, }; char *c; int r; if (!cipher || !cipher_mode || !volume_key_size) return -EINVAL; r = init_crypto(cd); if (r < 0) return r; r = -ENOMEM; if (iv_size) { cp.iv = malloc(iv_size); if (!cp.iv) goto out; crypt_random_get(cd, cp.iv, iv_size, CRYPT_RND_NORMAL); } cp.key = malloc(volume_key_size); if (!cp.key) goto out; crypt_random_get(cd, cp.key, volume_key_size, CRYPT_RND_NORMAL); strncpy(cp.name, cipher, sizeof(cp.name)-1); strncpy(cp.mode, cipher_mode, sizeof(cp.mode)-1); /* Ignore IV generator */ if ((c = strchr(cp.mode, '-'))) *c = '\0'; r = cipher_perf(&cp, encryption_mbs, decryption_mbs); out: free(cp.key); free(cp.iv); return r; }
int AF_split(char *src, char *dst, size_t blocksize, unsigned int blocknumbers, const char *hash) { unsigned int i; char *bufblock; int r = -EINVAL; if((bufblock = calloc(blocksize, 1)) == NULL) return -ENOMEM; /* process everything except the last block */ for(i=0; i<blocknumbers-1; i++) { r = crypt_random_get(NULL, dst+(blocksize*i), blocksize, CRYPT_RND_NORMAL); if(r < 0) goto out; XORblock(dst+(blocksize*i),bufblock,bufblock,blocksize); if(diffuse(bufblock, bufblock, blocksize, hash)) goto out; } /* the last block is computed */ XORblock(src,bufblock,dst+(i*blocksize),blocksize); r = 0; out: free(bufblock); return r; }
/* Check that kernel supports requested cipher by decryption of one sector */ int LUKS_check_cipher(struct crypt_device *ctx, size_t keylength, const char *cipher, const char *cipher_mode) { int r; struct volume_key *empty_key; char buf[SECTOR_SIZE]; log_dbg("Checking if cipher %s-%s is usable.", cipher, cipher_mode); empty_key = crypt_alloc_volume_key(keylength, NULL); if (!empty_key) return -ENOMEM; /* No need to get KEY quality random but it must avoid known weak keys. */ r = crypt_random_get(ctx, empty_key->key, empty_key->keylength, CRYPT_RND_NORMAL); if (!r) r = LUKS_decrypt_from_storage(buf, sizeof(buf), cipher, cipher_mode, empty_key, 0, ctx); crypt_free_volume_key(empty_key); crypt_memzero(buf, sizeof(buf)); return r; }
int LUKS_set_key(unsigned int keyIndex, const char *password, size_t passwordLen, struct luks_phdr *hdr, struct volume_key *vk, uint32_t iteration_time_ms, uint64_t *PBKDF2_per_sec, struct crypt_device *ctx) { struct volume_key *derived_key; char *AfKey = NULL; size_t AFEKSize; uint64_t PBKDF2_temp; int r; if(hdr->keyblock[keyIndex].active != LUKS_KEY_DISABLED) { log_err(ctx, _("Key slot %d active, purge first.\n"), keyIndex); return -EINVAL; } /* LUKS keyslot has always at least 4000 stripes accoding to specification */ if(hdr->keyblock[keyIndex].stripes < 4000) { log_err(ctx, _("Key slot %d material includes too few stripes. Header manipulation?\n"), keyIndex); return -EINVAL; } log_dbg("Calculating data for key slot %d", keyIndex); r = crypt_benchmark_kdf(ctx, "pbkdf2", hdr->hashSpec, "foo", 3, "bar", 3, PBKDF2_per_sec); if (r < 0) { log_err(ctx, _("Not compatible PBKDF2 options (using hash algorithm %s).\n"), hdr->hashSpec); return r; } /* * Avoid floating point operation * Final iteration count is at least LUKS_SLOT_ITERATIONS_MIN */ PBKDF2_temp = (*PBKDF2_per_sec / 2) * (uint64_t)iteration_time_ms; PBKDF2_temp /= 1024; if (PBKDF2_temp > UINT32_MAX) PBKDF2_temp = UINT32_MAX; hdr->keyblock[keyIndex].passwordIterations = at_least((uint32_t)PBKDF2_temp, LUKS_SLOT_ITERATIONS_MIN); log_dbg("Key slot %d use %" PRIu32 " password iterations.", keyIndex, hdr->keyblock[keyIndex].passwordIterations); derived_key = crypt_alloc_volume_key(hdr->keyBytes, NULL); if (!derived_key) return -ENOMEM; r = crypt_random_get(ctx, hdr->keyblock[keyIndex].passwordSalt, LUKS_SALTSIZE, CRYPT_RND_SALT); if (r < 0) goto out; r = crypt_pbkdf("pbkdf2", hdr->hashSpec, password, passwordLen, hdr->keyblock[keyIndex].passwordSalt, LUKS_SALTSIZE, derived_key->key, hdr->keyBytes, hdr->keyblock[keyIndex].passwordIterations); if (r < 0) goto out; /* * AF splitting, the masterkey stored in vk->key is split to AfKey */ assert(vk->keylength == hdr->keyBytes); AFEKSize = AF_split_sectors(vk->keylength, hdr->keyblock[keyIndex].stripes) * SECTOR_SIZE; AfKey = crypt_safe_alloc(AFEKSize); if (!AfKey) { r = -ENOMEM; goto out; } log_dbg("Using hash %s for AF in key slot %d, %d stripes", hdr->hashSpec, keyIndex, hdr->keyblock[keyIndex].stripes); r = AF_split(vk->key,AfKey,vk->keylength,hdr->keyblock[keyIndex].stripes,hdr->hashSpec); if (r < 0) goto out; log_dbg("Updating key slot %d [0x%04x] area.", keyIndex, hdr->keyblock[keyIndex].keyMaterialOffset << 9); /* Encryption via dm */ r = LUKS_encrypt_to_storage(AfKey, AFEKSize, hdr->cipherName, hdr->cipherMode, derived_key, hdr->keyblock[keyIndex].keyMaterialOffset, ctx); if (r < 0) goto out; /* Mark the key as active in phdr */ r = LUKS_keyslot_set(hdr, (int)keyIndex, 1); if (r < 0) goto out; r = LUKS_write_phdr(hdr, ctx); if (r < 0) goto out; r = 0; out: crypt_safe_free(AfKey); crypt_free_volume_key(derived_key); return r; }
int LUKS_generate_phdr(struct luks_phdr *header, const struct volume_key *vk, const char *cipherName, const char *cipherMode, const char *hashSpec, const char *uuid, unsigned int stripes, unsigned int alignPayload, unsigned int alignOffset, uint32_t iteration_time_ms, uint64_t *PBKDF2_per_sec, int detached_metadata_device, struct crypt_device *ctx) { unsigned int i = 0, hdr_sectors = LUKS_device_sectors(vk->keylength); size_t blocksPerStripeSet, currentSector; int r; uuid_t partitionUuid; char luksMagic[] = LUKS_MAGIC; /* For separate metadata device allow zero alignment */ if (alignPayload == 0 && !detached_metadata_device) alignPayload = DEFAULT_DISK_ALIGNMENT / SECTOR_SIZE; if (alignPayload && detached_metadata_device && alignPayload < hdr_sectors) { log_err(ctx, _("Data offset for detached LUKS header must be " "either 0 or higher than header size (%d sectors).\n"), hdr_sectors); return -EINVAL; } if (crypt_hmac_size(hashSpec) < LUKS_DIGESTSIZE) { log_err(ctx, _("Requested LUKS hash %s is not supported.\n"), hashSpec); return -EINVAL; } if (uuid && uuid_parse(uuid, partitionUuid) == -1) { log_err(ctx, _("Wrong LUKS UUID format provided.\n")); return -EINVAL; } if (!uuid) uuid_generate(partitionUuid); memset(header,0,sizeof(struct luks_phdr)); /* Set Magic */ memcpy(header->magic,luksMagic,LUKS_MAGIC_L); header->version=1; strncpy(header->cipherName,cipherName,LUKS_CIPHERNAME_L); strncpy(header->cipherMode,cipherMode,LUKS_CIPHERMODE_L); strncpy(header->hashSpec,hashSpec,LUKS_HASHSPEC_L); header->keyBytes=vk->keylength; LUKS_fix_header_compatible(header); r = LUKS_check_cipher(header, ctx); if (r < 0) return r; log_dbg("Generating LUKS header version %d using hash %s, %s, %s, MK %d bytes", header->version, header->hashSpec ,header->cipherName, header->cipherMode, header->keyBytes); r = crypt_random_get(ctx, header->mkDigestSalt, LUKS_SALTSIZE, CRYPT_RND_SALT); if(r < 0) { log_err(ctx, _("Cannot create LUKS header: reading random salt failed.\n")); return r; } r = crypt_benchmark_kdf(ctx, "pbkdf2", header->hashSpec, "foo", 3, "bar", 3, PBKDF2_per_sec); if (r < 0) { log_err(ctx, _("Not compatible PBKDF2 options (using hash algorithm %s).\n"), header->hashSpec); return r; } /* Compute master key digest */ iteration_time_ms /= 8; header->mkDigestIterations = at_least((uint32_t)(*PBKDF2_per_sec/1024) * iteration_time_ms, LUKS_MKD_ITERATIONS_MIN); r = crypt_pbkdf("pbkdf2", header->hashSpec, vk->key,vk->keylength, header->mkDigestSalt, LUKS_SALTSIZE, header->mkDigest,LUKS_DIGESTSIZE, header->mkDigestIterations); if(r < 0) { log_err(ctx, _("Cannot create LUKS header: header digest failed (using hash %s).\n"), header->hashSpec); return r; } currentSector = LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE; blocksPerStripeSet = AF_split_sectors(vk->keylength, stripes); for(i = 0; i < LUKS_NUMKEYS; ++i) { header->keyblock[i].active = LUKS_KEY_DISABLED; header->keyblock[i].keyMaterialOffset = currentSector; header->keyblock[i].stripes = stripes; currentSector = size_round_up(currentSector + blocksPerStripeSet, LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE); } if (detached_metadata_device) { /* for separate metadata device use alignPayload directly */ header->payloadOffset = alignPayload; } else { /* alignOffset - offset from natural device alignment provided by topology info */ currentSector = size_round_up(currentSector, alignPayload); header->payloadOffset = currentSector + alignOffset; } uuid_unparse(partitionUuid, header->uuid); log_dbg("Data offset %d, UUID %s, digest iterations %" PRIu32, header->payloadOffset, header->uuid, header->mkDigestIterations); return 0; }
int LUKS_set_key(unsigned int keyIndex, const char *password, size_t passwordLen, struct luks_phdr *hdr, struct volume_key *vk, struct crypt_device *ctx) { struct volume_key *derived_key; char *AfKey = NULL; size_t AFEKSize; struct crypt_pbkdf_type *pbkdf; int r; if(hdr->keyblock[keyIndex].active != LUKS_KEY_DISABLED) { log_err(ctx, _("Key slot %d active, purge first."), keyIndex); return -EINVAL; } /* LUKS keyslot has always at least 4000 stripes according to specification */ if(hdr->keyblock[keyIndex].stripes < 4000) { log_err(ctx, _("Key slot %d material includes too few stripes. Header manipulation?"), keyIndex); return -EINVAL; } log_dbg("Calculating data for key slot %d", keyIndex); pbkdf = crypt_get_pbkdf(ctx); r = crypt_benchmark_pbkdf_internal(ctx, pbkdf, vk->keylength); if (r < 0) return r; assert(pbkdf->iterations); /* * Final iteration count is at least LUKS_SLOT_ITERATIONS_MIN */ hdr->keyblock[keyIndex].passwordIterations = at_least(pbkdf->iterations, LUKS_SLOT_ITERATIONS_MIN); log_dbg("Key slot %d use %" PRIu32 " password iterations.", keyIndex, hdr->keyblock[keyIndex].passwordIterations); derived_key = crypt_alloc_volume_key(hdr->keyBytes, NULL); if (!derived_key) return -ENOMEM; r = crypt_random_get(ctx, hdr->keyblock[keyIndex].passwordSalt, LUKS_SALTSIZE, CRYPT_RND_SALT); if (r < 0) goto out; r = crypt_pbkdf(CRYPT_KDF_PBKDF2, hdr->hashSpec, password, passwordLen, hdr->keyblock[keyIndex].passwordSalt, LUKS_SALTSIZE, derived_key->key, hdr->keyBytes, hdr->keyblock[keyIndex].passwordIterations, 0, 0); if (r < 0) goto out; /* * AF splitting, the masterkey stored in vk->key is split to AfKey */ assert(vk->keylength == hdr->keyBytes); AFEKSize = AF_split_sectors(vk->keylength, hdr->keyblock[keyIndex].stripes) * SECTOR_SIZE; AfKey = crypt_safe_alloc(AFEKSize); if (!AfKey) { r = -ENOMEM; goto out; } log_dbg("Using hash %s for AF in key slot %d, %d stripes", hdr->hashSpec, keyIndex, hdr->keyblock[keyIndex].stripes); r = AF_split(vk->key,AfKey,vk->keylength,hdr->keyblock[keyIndex].stripes,hdr->hashSpec); if (r < 0) goto out; log_dbg("Updating key slot %d [0x%04x] area.", keyIndex, hdr->keyblock[keyIndex].keyMaterialOffset << 9); /* Encryption via dm */ r = LUKS_encrypt_to_storage(AfKey, AFEKSize, hdr->cipherName, hdr->cipherMode, derived_key, hdr->keyblock[keyIndex].keyMaterialOffset, ctx); if (r < 0) goto out; /* Mark the key as active in phdr */ r = LUKS_keyslot_set(hdr, (int)keyIndex, 1); if (r < 0) goto out; r = LUKS_write_phdr(hdr, ctx); if (r < 0) goto out; r = 0; out: crypt_safe_free(AfKey); crypt_free_volume_key(derived_key); return r; }
int LUKS_generate_phdr(struct luks_phdr *header, const struct volume_key *vk, const char *cipherName, const char *cipherMode, const char *hashSpec, const char *uuid, uint64_t data_offset, /* in bytes */ uint64_t align_offset, /* in bytes */ uint64_t required_alignment, /* in bytes */ struct crypt_device *ctx) { int i, r; size_t keyslot_sectors, header_sectors; uuid_t partitionUuid; struct crypt_pbkdf_type *pbkdf; double PBKDF2_temp; char luksMagic[] = LUKS_MAGIC; if (data_offset % SECTOR_SIZE || align_offset % SECTOR_SIZE || required_alignment % SECTOR_SIZE) return -EINVAL; memset(header, 0, sizeof(struct luks_phdr)); keyslot_sectors = AF_split_sectors(vk->keylength, LUKS_STRIPES); header_sectors = LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE; for (i = 0; i < LUKS_NUMKEYS; i++) { header->keyblock[i].active = LUKS_KEY_DISABLED; header->keyblock[i].keyMaterialOffset = header_sectors; header->keyblock[i].stripes = LUKS_STRIPES; header_sectors = size_round_up(header_sectors + keyslot_sectors, LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE); } /* In sector is now size of all keyslot material space */ /* Data offset has priority */ if (data_offset) header->payloadOffset = data_offset / SECTOR_SIZE; else if (required_alignment) { header->payloadOffset = size_round_up(header_sectors, (required_alignment / SECTOR_SIZE)); header->payloadOffset += (align_offset / SECTOR_SIZE); } else header->payloadOffset = 0; if (header->payloadOffset && header->payloadOffset < header_sectors) { log_err(ctx, _("Data offset for LUKS header must be " "either 0 or higher than header size.")); return -EINVAL; } if (crypt_hmac_size(hashSpec) < LUKS_DIGESTSIZE) { log_err(ctx, _("Requested LUKS hash %s is not supported."), hashSpec); return -EINVAL; } if (uuid && uuid_parse(uuid, partitionUuid) == -1) { log_err(ctx, _("Wrong LUKS UUID format provided.")); return -EINVAL; } if (!uuid) uuid_generate(partitionUuid); /* Set Magic */ memcpy(header->magic,luksMagic,LUKS_MAGIC_L); header->version=1; strncpy(header->cipherName,cipherName,LUKS_CIPHERNAME_L-1); strncpy(header->cipherMode,cipherMode,LUKS_CIPHERMODE_L-1); strncpy(header->hashSpec,hashSpec,LUKS_HASHSPEC_L-1); header->keyBytes=vk->keylength; LUKS_fix_header_compatible(header); log_dbg(ctx, "Generating LUKS header version %d using hash %s, %s, %s, MK %d bytes", header->version, header->hashSpec ,header->cipherName, header->cipherMode, header->keyBytes); r = crypt_random_get(ctx, header->mkDigestSalt, LUKS_SALTSIZE, CRYPT_RND_SALT); if(r < 0) { log_err(ctx, _("Cannot create LUKS header: reading random salt failed.")); return r; } /* Compute master key digest */ pbkdf = crypt_get_pbkdf(ctx); r = crypt_benchmark_pbkdf_internal(ctx, pbkdf, vk->keylength); if (r < 0) return r; assert(pbkdf->iterations); PBKDF2_temp = (double)pbkdf->iterations * LUKS_MKD_ITERATIONS_MS / pbkdf->time_ms; if (PBKDF2_temp > (double)UINT32_MAX) return -EINVAL; header->mkDigestIterations = at_least((uint32_t)PBKDF2_temp, LUKS_MKD_ITERATIONS_MIN); r = crypt_pbkdf(CRYPT_KDF_PBKDF2, header->hashSpec, vk->key,vk->keylength, header->mkDigestSalt, LUKS_SALTSIZE, header->mkDigest,LUKS_DIGESTSIZE, header->mkDigestIterations, 0, 0); if (r < 0) { log_err(ctx, _("Cannot create LUKS header: header digest failed (using hash %s)."), header->hashSpec); return r; } uuid_unparse(partitionUuid, header->uuid); log_dbg(ctx, "Data offset %d, UUID %s, digest iterations %" PRIu32, header->payloadOffset, header->uuid, header->mkDigestIterations); return 0; }
/* * Read and convert on-disk LUKS2 header to in-memory representation.. * Try to do recovery if on-disk state is not consistent. */ int LUKS2_disk_hdr_read(struct crypt_device *cd, struct luks2_hdr *hdr, struct device *device, int do_recovery) { enum { HDR_OK, HDR_OBSOLETE, HDR_FAIL, HDR_FAIL_IO } state_hdr1, state_hdr2; struct luks2_hdr_disk hdr_disk1, hdr_disk2; char *json_area1 = NULL, *json_area2 = NULL; json_object *jobj_hdr1 = NULL, *jobj_hdr2 = NULL; int i, r; uint64_t hdr_size; if (do_recovery && !crypt_metadata_locking_enabled()) { do_recovery = 0; log_dbg("Disabling header auto-recovery due to locking being disabled."); } /* * Read primary LUKS2 header (offset 0). */ state_hdr1 = HDR_FAIL; r = hdr_read_disk(device, &hdr_disk1, &json_area1, 0, 0); if (r == 0) { jobj_hdr1 = parse_and_validate_json(json_area1, be64_to_cpu(hdr_disk1.hdr_size) - LUKS2_HDR_BIN_LEN); state_hdr1 = jobj_hdr1 ? HDR_OK : HDR_OBSOLETE; } else if (r == -EIO) state_hdr1 = HDR_FAIL_IO; /* * Read secondary LUKS2 header (follows primary). */ state_hdr2 = HDR_FAIL; if (state_hdr1 != HDR_FAIL && state_hdr1 != HDR_FAIL_IO) { r = hdr_read_disk(device, &hdr_disk2, &json_area2, be64_to_cpu(hdr_disk1.hdr_size), 1); if (r == 0) { jobj_hdr2 = parse_and_validate_json(json_area2, be64_to_cpu(hdr_disk2.hdr_size) - LUKS2_HDR_BIN_LEN); state_hdr2 = jobj_hdr2 ? HDR_OK : HDR_OBSOLETE; } else if (r == -EIO) state_hdr2 = HDR_FAIL_IO; } else { /* * No header size, check all known offsets. */ for (r = -EINVAL,i = 2; r < 0 && i <= 1024; i <<= 1) r = hdr_read_disk(device, &hdr_disk2, &json_area2, i * 4096, 1); if (r == 0) { jobj_hdr2 = parse_and_validate_json(json_area2, be64_to_cpu(hdr_disk2.hdr_size) - LUKS2_HDR_BIN_LEN); state_hdr2 = jobj_hdr2 ? HDR_OK : HDR_OBSOLETE; } else if (r == -EIO) state_hdr2 = HDR_FAIL_IO; } /* * Check sequence id if both headers are read correctly. */ if (state_hdr1 == HDR_OK && state_hdr2 == HDR_OK) { if (be64_to_cpu(hdr_disk1.seqid) > be64_to_cpu(hdr_disk2.seqid)) state_hdr2 = HDR_OBSOLETE; else if (be64_to_cpu(hdr_disk1.seqid) < be64_to_cpu(hdr_disk2.seqid)) state_hdr1 = HDR_OBSOLETE; } /* check header with keyslots to fit the device */ if (state_hdr1 == HDR_OK) hdr_size = LUKS2_hdr_and_areas_size(jobj_hdr1); else if (state_hdr2 == HDR_OK) hdr_size = LUKS2_hdr_and_areas_size(jobj_hdr2); else { r = (state_hdr1 == HDR_FAIL_IO && state_hdr2 == HDR_FAIL_IO) ? -EIO : -EINVAL; goto err; } r = LUKS2_check_device_size(cd, device, hdr_size, 0); if (r) goto err; /* * Try to rewrite (recover) bad header. Always regenerate salt for bad header. */ if (state_hdr1 == HDR_OK && state_hdr2 != HDR_OK) { log_dbg("Secondary LUKS2 header requires recovery."); if (do_recovery) { memcpy(&hdr_disk2, &hdr_disk1, LUKS2_HDR_BIN_LEN); r = crypt_random_get(NULL, (char*)hdr_disk2.salt, sizeof(hdr_disk2.salt), CRYPT_RND_SALT); if (r) log_dbg("Cannot generate master salt."); else { hdr_from_disk(&hdr_disk1, &hdr_disk2, hdr, 0); r = hdr_write_disk(device, hdr, json_area1, 1); } if (r) log_dbg("Secondary LUKS2 header recovery failed."); } } else if (state_hdr1 != HDR_OK && state_hdr2 == HDR_OK) { log_dbg("Primary LUKS2 header requires recovery."); if (do_recovery) { memcpy(&hdr_disk1, &hdr_disk2, LUKS2_HDR_BIN_LEN); r = crypt_random_get(NULL, (char*)hdr_disk1.salt, sizeof(hdr_disk1.salt), CRYPT_RND_SALT); if (r) log_dbg("Cannot generate master salt."); else { hdr_from_disk(&hdr_disk2, &hdr_disk1, hdr, 1); r = hdr_write_disk(device, hdr, json_area2, 0); } if (r) log_dbg("Primary LUKS2 header recovery failed."); } } free(json_area1); json_area1 = NULL; free(json_area2); json_area2 = NULL; /* wrong lock for write mode during recovery attempt */ if (r == -EAGAIN) goto err; /* * Even if status is failed, the second header includes salt. */ if (state_hdr1 == HDR_OK) { hdr_from_disk(&hdr_disk1, &hdr_disk2, hdr, 0); hdr->jobj = jobj_hdr1; json_object_put(jobj_hdr2); } else if (state_hdr2 == HDR_OK) { hdr_from_disk(&hdr_disk2, &hdr_disk1, hdr, 1); hdr->jobj = jobj_hdr2; json_object_put(jobj_hdr1); } /* * FIXME: should this fail? At least one header was read correctly. * r = (state_hdr1 == HDR_FAIL_IO || state_hdr2 == HDR_FAIL_IO) ? -EIO : -EINVAL; */ return 0; err: log_dbg("LUKS2 header read failed (%d).", r); free(json_area1); free(json_area2); json_object_put(jobj_hdr1); json_object_put(jobj_hdr2); hdr->jobj = NULL; return r; }