/* Receive the header of an option reply, which should match the given * opt. Read through the length field, but NOT the length bytes of * payload. Return 0 if successful, -1 with errp set if it is * impossible to continue. */ static int nbd_receive_option_reply(QIOChannel *ioc, uint32_t opt, nbd_opt_reply *reply, Error **errp) { QEMU_BUILD_BUG_ON(sizeof(*reply) != 20); if (nbd_read(ioc, reply, sizeof(*reply), errp) < 0) { error_prepend(errp, "failed to read option reply"); nbd_send_opt_abort(ioc); return -1; } be64_to_cpus(&reply->magic); be32_to_cpus(&reply->option); be32_to_cpus(&reply->type); be32_to_cpus(&reply->length); trace_nbd_receive_option_reply(reply->option, nbd_opt_lookup(reply->option), reply->type, nbd_rep_lookup(reply->type), reply->length); if (reply->magic != NBD_REP_MAGIC) { error_setg(errp, "Unexpected option reply magic"); nbd_send_opt_abort(ioc); return -1; } if (reply->option != opt) { error_setg(errp, "Unexpected option type %x expected %x", reply->option, opt); nbd_send_opt_abort(ioc); return -1; } return 0; }
static inline void bitmap_dir_entry_to_cpu(Qcow2BitmapDirEntry *entry) { be64_to_cpus(&entry->bitmap_table_offset); be32_to_cpus(&entry->bitmap_table_size); be32_to_cpus(&entry->flags); be16_to_cpus(&entry->name_size); be32_to_cpus(&entry->extra_data_size); }
static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu, int send) { if (send) { cpu_to_be32s(&pdu->status); cpu_to_be32s(&pdu->actual_length); cpu_to_be32s(&pdu->start_frame); cpu_to_be32s(&pdu->error_count); } else { be32_to_cpus(&pdu->status); be32_to_cpus(&pdu->actual_length); be32_to_cpus(&pdu->start_frame); be32_to_cpus(&pdu->error_count); } }
static void correct_endian_ret_unlink(struct usbip_header_ret_unlink *pdu, int send) { if (send) cpu_to_be32s(&pdu->status); else be32_to_cpus(&pdu->status); }
/* Reads a snapshot record from a qcow2-formatted file. * * The function assumes the file position of 'fd' points to the beginning of a * QcowSnapshotHeader record. When the call returns, the file position of fd is * at the place where the next QcowSnapshotHeader should start, if there is one. * * C.f. QCowSnapshotHeader in block/qcow2-snapshot.c for the complete layout of * the header. */ static void snapshot_info_read( int fd, SnapshotInfo* info ) { uint64_t start_offset = seek_or_die(fd, 0, SEEK_CUR); uint32_t extra_data_size; uint16_t id_str_size, name_size; /* read fixed-length fields */ seek_or_die(fd, 12, SEEK_CUR); /* skip l1 info */ read_or_die(fd, &id_str_size, sizeof(id_str_size)); read_or_die(fd, &name_size, sizeof(name_size)); read_or_die(fd, &info->date_sec, sizeof(info->date_sec)); read_or_die(fd, &info->date_nsec, sizeof(info->date_nsec)); read_or_die(fd, &info->vm_clock_nsec, sizeof(info->vm_clock_nsec)); read_or_die(fd, &info->vm_state_size, sizeof(info->vm_state_size)); read_or_die(fd, &extra_data_size, sizeof(extra_data_size)); /* convert to host endianness */ be16_to_cpus(&id_str_size); be16_to_cpus(&name_size); be32_to_cpus(&info->date_sec); be32_to_cpus(&info->date_nsec); be64_to_cpus(&info->vm_clock_nsec); be32_to_cpus(&info->vm_state_size); be32_to_cpus(&extra_data_size); be32_to_cpus(&extra_data_size); /* read variable-length buffers*/ info->id_str = android_alloc(id_str_size + 1); // +1: manual null-termination info->name = android_alloc(name_size + 1); seek_or_die(fd, extra_data_size, SEEK_CUR); /* skip extra data */ read_or_die(fd, info->id_str, id_str_size); read_or_die(fd, info->name, name_size); info->id_str[id_str_size] = '\0'; info->name[name_size] = '\0'; /* headers are 8 byte aligned, ceil to nearest multiple of 8 */ uint64_t end_offset = seek_or_die(fd, 0, SEEK_CUR); uint32_t total_size = end_offset - start_offset; uint32_t aligned_size = ((total_size - 1) / 8 + 1) * 8; /* skip to start of next record */ seek_or_die(fd, start_offset + aligned_size, SEEK_SET); }
static void correct_endian_cmd_submit(struct usbip_header_cmd_submit *pdu, int send) { if (send) { pdu->transfer_flags = cpu_to_be32(pdu->transfer_flags); cpu_to_be32s(&pdu->transfer_buffer_length); cpu_to_be32s(&pdu->start_frame); cpu_to_be32s(&pdu->number_of_packets); cpu_to_be32s(&pdu->interval); } else { pdu->transfer_flags = be32_to_cpu(pdu->transfer_flags); be32_to_cpus(&pdu->transfer_buffer_length); be32_to_cpus(&pdu->start_frame); be32_to_cpus(&pdu->number_of_packets); be32_to_cpus(&pdu->interval); } }
/* * Helper function to analyze a PIMFOR management frame header. */ static pimfor_header_t * pimfor_decode_header(void *data, int len) { pimfor_header_t *h = data; while ((void *) h < data + len) { if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) { le32_to_cpus(&h->oid); le32_to_cpus(&h->length); } else { be32_to_cpus(&h->oid); be32_to_cpus(&h->length); } if (h->oid != OID_INL_TUNNEL) return h; h++; } return NULL; }
static int name_card(struct snd_oxfw *oxfw) { struct fw_device *fw_dev = fw_parent_device(oxfw->unit); const struct compat_info *info; char vendor[24]; char model[32]; const char *d, *v, *m; u32 firmware; int err; /* get vendor name from root directory */ err = fw_csr_string(fw_dev->config_rom + 5, CSR_VENDOR, vendor, sizeof(vendor)); if (err < 0) goto end; /* get model name from unit directory */ err = fw_csr_string(oxfw->unit->directory, CSR_MODEL, model, sizeof(model)); if (err < 0) goto end; err = snd_fw_transaction(oxfw->unit, TCODE_READ_QUADLET_REQUEST, OXFORD_FIRMWARE_ID_ADDRESS, &firmware, 4, 0); if (err < 0) goto end; be32_to_cpus(&firmware); /* to apply card definitions */ if (oxfw->entry->vendor_id == VENDOR_GRIFFIN || oxfw->entry->vendor_id == VENDOR_LACIE) { info = (const struct compat_info *)oxfw->entry->driver_data; d = info->driver_name; v = info->vendor_name; m = info->model_name; } else { d = "OXFW"; v = vendor; m = model; } strcpy(oxfw->card->driver, d); strcpy(oxfw->card->mixername, m); strcpy(oxfw->card->shortname, m); snprintf(oxfw->card->longname, sizeof(oxfw->card->longname), "%s %s (OXFW%x %04x), GUID %08x%08x at %s, S%d", v, m, firmware >> 20, firmware & 0xffff, fw_dev->config_rom[3], fw_dev->config_rom[4], dev_name(&oxfw->unit->device), 100 << fw_dev->max_speed); end: return err; }
/* nbd_receive_simple_reply * Read simple reply except magic field (which should be already read). * Payload is not read (payload is possible for CMD_READ, but here we even * don't know whether it take place or not). */ static int nbd_receive_simple_reply(QIOChannel *ioc, NBDSimpleReply *reply, Error **errp) { int ret; assert(reply->magic == NBD_SIMPLE_REPLY_MAGIC); ret = nbd_read(ioc, (uint8_t *)reply + sizeof(reply->magic), sizeof(*reply) - sizeof(reply->magic), errp); if (ret < 0) { return ret; } be32_to_cpus(&reply->error); be64_to_cpus(&reply->handle); return 0; }
/* nbd_receive_reply * Returns 1 on success * 0 on eof, when no data was read (errp is not set) * negative errno on failure (errp is set) */ int nbd_receive_reply(QIOChannel *ioc, NBDReply *reply, Error **errp) { int ret; const char *type; ret = nbd_read_eof(ioc, &reply->magic, sizeof(reply->magic), errp); if (ret <= 0) { return ret; } be32_to_cpus(&reply->magic); switch (reply->magic) { case NBD_SIMPLE_REPLY_MAGIC: ret = nbd_receive_simple_reply(ioc, &reply->simple, errp); if (ret < 0) { break; } trace_nbd_receive_simple_reply(reply->simple.error, nbd_err_lookup(reply->simple.error), reply->handle); break; case NBD_STRUCTURED_REPLY_MAGIC: ret = nbd_receive_structured_reply_chunk(ioc, &reply->structured, errp); if (ret < 0) { break; } type = nbd_reply_type_lookup(reply->structured.type); trace_nbd_receive_structured_reply_chunk(reply->structured.flags, reply->structured.type, type, reply->structured.handle, reply->structured.length); break; default: error_setg(errp, "invalid magic (got 0x%" PRIx32 ")", reply->magic); return -EINVAL; } if (ret < 0) { return ret; } return 1; }
/* nbd_receive_structured_reply_chunk * Read structured reply chunk except magic field (which should be already * read). * Payload is not read. */ static int nbd_receive_structured_reply_chunk(QIOChannel *ioc, NBDStructuredReplyChunk *chunk, Error **errp) { int ret; assert(chunk->magic == NBD_STRUCTURED_REPLY_MAGIC); ret = nbd_read(ioc, (uint8_t *)chunk + sizeof(chunk->magic), sizeof(*chunk) - sizeof(chunk->magic), errp); if (ret < 0) { return ret; } be16_to_cpus(&chunk->flags); be16_to_cpus(&chunk->type); be64_to_cpus(&chunk->handle); be32_to_cpus(&chunk->length); return 0; }
static int get_filesize(char *filename, uint64_t *size, struct stat *st) { int fd; QCowHeader header; /*Set to the backing file size*/ fd = open(filename, O_RDONLY); if (fd < 0) return -1; if (read(fd, &header, sizeof(header)) < sizeof(header)) { close(fd); return -1; } close(fd); be32_to_cpus(&header.magic); be64_to_cpus(&header.size); if (header.magic == QCOW_MAGIC) { *size = header.size >> SECTOR_SHIFT; return 0; }
static int CVE_2014_0223_qemu1_0_qcow_open(BlockDriverState *bs, int flags) { BDRVQcowState *s = bs->opaque; int len, i, shift; QCowHeader header; if (bdrv_pread(bs->file, 0, &header, sizeof(header)) != sizeof(header)) goto fail; be32_to_cpus(&header.magic); be32_to_cpus(&header.version); be64_to_cpus(&header.backing_file_offset); be32_to_cpus(&header.backing_file_size); be32_to_cpus(&header.mtime); be64_to_cpus(&header.size); be32_to_cpus(&header.crypt_method); be64_to_cpus(&header.l1_table_offset); if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION) goto fail; if (header.size <= 1 || header.cluster_bits < 9) goto fail; if (header.crypt_method > QCOW_CRYPT_AES) goto fail; s->crypt_method_header = header.crypt_method; if (s->crypt_method_header) bs->encrypted = 1; s->cluster_bits = header.cluster_bits; s->cluster_size = 1 << s->cluster_bits; s->cluster_sectors = 1 << (s->cluster_bits - 9); s->l2_bits = header.l2_bits; s->l2_size = 1 << s->l2_bits; bs->total_sectors = header.size / 512; s->cluster_offset_mask = (1LL << (63 - s->cluster_bits)) - 1; /* read the level 1 table */ shift = s->cluster_bits + s->l2_bits; s->l1_size = (header.size + (1LL << shift) - 1) >> shift; s->l1_table_offset = header.l1_table_offset; s->l1_table = g_malloc(s->l1_size * sizeof(uint64_t)); if (!s->l1_table) goto fail; if (bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) != s->l1_size * sizeof(uint64_t)) goto fail; for(i = 0;i < s->l1_size; i++) { be64_to_cpus(&s->l1_table[i]); } /* alloc L2 cache */ s->l2_cache = g_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t)); if (!s->l2_cache) goto fail; s->cluster_cache = g_malloc(s->cluster_size); if (!s->cluster_cache) goto fail; s->cluster_data = g_malloc(s->cluster_size); if (!s->cluster_data) goto fail; s->cluster_cache_offset = -1; /* read the backing file name */ if (header.backing_file_offset != 0) { len = header.backing_file_size; if (len > 1023) len = 1023; if (bdrv_pread(bs->file, header.backing_file_offset, bs->backing_file, len) != len) goto fail; bs->backing_file[len] = '\0'; } /* Disable migration when qcow images are used */ error_set(&s->migration_blocker, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, "qcow", bs->device_name, "live migration"); migrate_add_blocker(s->migration_blocker); qemu_co_mutex_init(&s->lock); return 0; fail: g_free(s->l1_table); g_free(s->l2_cache); g_free(s->cluster_cache); g_free(s->cluster_data); return -1; }
static int qcrypto_block_luks_create(QCryptoBlock *block, QCryptoBlockCreateOptions *options, QCryptoBlockInitFunc initfunc, QCryptoBlockWriteFunc writefunc, void *opaque, Error **errp) { QCryptoBlockLUKS *luks; QCryptoBlockCreateOptionsLUKS luks_opts; Error *local_err = NULL; uint8_t *masterkey = NULL; uint8_t *slotkey = NULL; uint8_t *splitkey = NULL; size_t splitkeylen = 0; size_t i; QCryptoCipher *cipher = NULL; QCryptoIVGen *ivgen = NULL; char *password; const char *cipher_alg; const char *cipher_mode; const char *ivgen_alg; const char *ivgen_hash_alg = NULL; const char *hash_alg; char *cipher_mode_spec = NULL; QCryptoCipherAlgorithm ivcipheralg = 0; uint64_t iters; memcpy(&luks_opts, &options->u.luks, sizeof(luks_opts)); if (!luks_opts.has_iter_time) { luks_opts.iter_time = 2000; } if (!luks_opts.has_cipher_alg) { luks_opts.cipher_alg = QCRYPTO_CIPHER_ALG_AES_256; } if (!luks_opts.has_cipher_mode) { luks_opts.cipher_mode = QCRYPTO_CIPHER_MODE_XTS; } if (!luks_opts.has_ivgen_alg) { luks_opts.ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64; } if (!luks_opts.has_hash_alg) { luks_opts.hash_alg = QCRYPTO_HASH_ALG_SHA256; } if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { if (!luks_opts.has_ivgen_hash_alg) { luks_opts.ivgen_hash_alg = QCRYPTO_HASH_ALG_SHA256; luks_opts.has_ivgen_hash_alg = true; } } /* Note we're allowing ivgen_hash_alg to be set even for * non-essiv iv generators that don't need a hash. It will * be silently ignored, for compatibility with dm-crypt */ if (!options->u.luks.key_secret) { error_setg(errp, "Parameter 'key-secret' is required for cipher"); return -1; } password = qcrypto_secret_lookup_as_utf8(luks_opts.key_secret, errp); if (!password) { return -1; } luks = g_new0(QCryptoBlockLUKS, 1); block->opaque = luks; memcpy(luks->header.magic, qcrypto_block_luks_magic, QCRYPTO_BLOCK_LUKS_MAGIC_LEN); /* We populate the header in native endianness initially and * then convert everything to big endian just before writing * it out to disk */ luks->header.version = QCRYPTO_BLOCK_LUKS_VERSION; qcrypto_block_luks_uuid_gen(luks->header.uuid); cipher_alg = qcrypto_block_luks_cipher_alg_lookup(luks_opts.cipher_alg, errp); if (!cipher_alg) { goto error; } cipher_mode = QCryptoCipherMode_lookup[luks_opts.cipher_mode]; ivgen_alg = QCryptoIVGenAlgorithm_lookup[luks_opts.ivgen_alg]; if (luks_opts.has_ivgen_hash_alg) { ivgen_hash_alg = QCryptoHashAlgorithm_lookup[luks_opts.ivgen_hash_alg]; cipher_mode_spec = g_strdup_printf("%s-%s:%s", cipher_mode, ivgen_alg, ivgen_hash_alg); } else { cipher_mode_spec = g_strdup_printf("%s-%s", cipher_mode, ivgen_alg); } hash_alg = QCryptoHashAlgorithm_lookup[luks_opts.hash_alg]; if (strlen(cipher_alg) >= QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN) { error_setg(errp, "Cipher name '%s' is too long for LUKS header", cipher_alg); goto error; } if (strlen(cipher_mode_spec) >= QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN) { error_setg(errp, "Cipher mode '%s' is too long for LUKS header", cipher_mode_spec); goto error; } if (strlen(hash_alg) >= QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN) { error_setg(errp, "Hash name '%s' is too long for LUKS header", hash_alg); goto error; } if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { ivcipheralg = qcrypto_block_luks_essiv_cipher(luks_opts.cipher_alg, luks_opts.ivgen_hash_alg, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } } else { ivcipheralg = luks_opts.cipher_alg; } strcpy(luks->header.cipher_name, cipher_alg); strcpy(luks->header.cipher_mode, cipher_mode_spec); strcpy(luks->header.hash_spec, hash_alg); luks->header.key_bytes = qcrypto_cipher_get_key_len(luks_opts.cipher_alg); if (luks_opts.cipher_mode == QCRYPTO_CIPHER_MODE_XTS) { luks->header.key_bytes *= 2; } /* Generate the salt used for hashing the master key * with PBKDF later */ if (qcrypto_random_bytes(luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, errp) < 0) { goto error; } /* Generate random master key */ masterkey = g_new0(uint8_t, luks->header.key_bytes); if (qcrypto_random_bytes(masterkey, luks->header.key_bytes, errp) < 0) { goto error; } /* Setup the block device payload encryption objects */ block->cipher = qcrypto_cipher_new(luks_opts.cipher_alg, luks_opts.cipher_mode, masterkey, luks->header.key_bytes, errp); if (!block->cipher) { goto error; } block->kdfhash = luks_opts.hash_alg; block->niv = qcrypto_cipher_get_iv_len(luks_opts.cipher_alg, luks_opts.cipher_mode); block->ivgen = qcrypto_ivgen_new(luks_opts.ivgen_alg, ivcipheralg, luks_opts.ivgen_hash_alg, masterkey, luks->header.key_bytes, errp); if (!block->ivgen) { goto error; } /* Determine how many iterations we need to hash the master * key, in order to have 1 second of compute time used */ iters = qcrypto_pbkdf2_count_iters(luks_opts.hash_alg, masterkey, luks->header.key_bytes, luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, QCRYPTO_BLOCK_LUKS_DIGEST_LEN, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } if (iters > (ULLONG_MAX / luks_opts.iter_time)) { error_setg_errno(errp, ERANGE, "PBKDF iterations %llu too large to scale", (unsigned long long)iters); goto error; } /* iter_time was in millis, but count_iters reported for secs */ iters = iters * luks_opts.iter_time / 1000; /* Why /= 8 ? That matches cryptsetup, but there's no * explanation why they chose /= 8... Probably so that * if all 8 keyslots are active we only spend 1 second * in total time to check all keys */ iters /= 8; if (iters > UINT32_MAX) { error_setg_errno(errp, ERANGE, "PBKDF iterations %llu larger than %u", (unsigned long long)iters, UINT32_MAX); goto error; } iters = MAX(iters, QCRYPTO_BLOCK_LUKS_MIN_MASTER_KEY_ITERS); luks->header.master_key_iterations = iters; /* Hash the master key, saving the result in the LUKS * header. This hash is used when opening the encrypted * device to verify that the user password unlocked a * valid master key */ if (qcrypto_pbkdf2(luks_opts.hash_alg, masterkey, luks->header.key_bytes, luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.master_key_iterations, luks->header.master_key_digest, QCRYPTO_BLOCK_LUKS_DIGEST_LEN, errp) < 0) { goto error; } /* Although LUKS has multiple key slots, we're just going * to use the first key slot */ splitkeylen = luks->header.key_bytes * QCRYPTO_BLOCK_LUKS_STRIPES; for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { luks->header.key_slots[i].active = i == 0 ? QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED : QCRYPTO_BLOCK_LUKS_KEY_SLOT_DISABLED; luks->header.key_slots[i].stripes = QCRYPTO_BLOCK_LUKS_STRIPES; /* This calculation doesn't match that shown in the spec, * but instead follows the cryptsetup implementation. */ luks->header.key_slots[i].key_offset = (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE) + (ROUND_UP(DIV_ROUND_UP(splitkeylen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE), (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) * i); } if (qcrypto_random_bytes(luks->header.key_slots[0].salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, errp) < 0) { goto error; } /* Again we determine how many iterations are required to * hash the user password while consuming 1 second of compute * time */ iters = qcrypto_pbkdf2_count_iters(luks_opts.hash_alg, (uint8_t *)password, strlen(password), luks->header.key_slots[0].salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.key_bytes, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } if (iters > (ULLONG_MAX / luks_opts.iter_time)) { error_setg_errno(errp, ERANGE, "PBKDF iterations %llu too large to scale", (unsigned long long)iters); goto error; } /* iter_time was in millis, but count_iters reported for secs */ iters = iters * luks_opts.iter_time / 1000; if (iters > UINT32_MAX) { error_setg_errno(errp, ERANGE, "PBKDF iterations %llu larger than %u", (unsigned long long)iters, UINT32_MAX); goto error; } luks->header.key_slots[0].iterations = MAX(iters, QCRYPTO_BLOCK_LUKS_MIN_SLOT_KEY_ITERS); /* Generate a key that we'll use to encrypt the master * key, from the user's password */ slotkey = g_new0(uint8_t, luks->header.key_bytes); if (qcrypto_pbkdf2(luks_opts.hash_alg, (uint8_t *)password, strlen(password), luks->header.key_slots[0].salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.key_slots[0].iterations, slotkey, luks->header.key_bytes, errp) < 0) { goto error; } /* Setup the encryption objects needed to encrypt the * master key material */ cipher = qcrypto_cipher_new(luks_opts.cipher_alg, luks_opts.cipher_mode, slotkey, luks->header.key_bytes, errp); if (!cipher) { goto error; } ivgen = qcrypto_ivgen_new(luks_opts.ivgen_alg, ivcipheralg, luks_opts.ivgen_hash_alg, slotkey, luks->header.key_bytes, errp); if (!ivgen) { goto error; } /* Before storing the master key, we need to vastly * increase its size, as protection against forensic * disk data recovery */ splitkey = g_new0(uint8_t, splitkeylen); if (qcrypto_afsplit_encode(luks_opts.hash_alg, luks->header.key_bytes, luks->header.key_slots[0].stripes, masterkey, splitkey, errp) < 0) { goto error; } /* Now we encrypt the split master key with the key generated * from the user's password, before storing it */ if (qcrypto_block_encrypt_helper(cipher, block->niv, ivgen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, 0, splitkey, splitkeylen, errp) < 0) { goto error; } /* The total size of the LUKS headers is the partition header + key * slot headers, rounded up to the nearest sector, combined with * the size of each master key material region, also rounded up * to the nearest sector */ luks->header.payload_offset = (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE) + (ROUND_UP(DIV_ROUND_UP(splitkeylen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE), (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) * QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS); block->payload_offset = luks->header.payload_offset * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; /* Reserve header space to match payload offset */ initfunc(block, block->payload_offset, opaque, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } /* Everything on disk uses Big Endian, so flip header fields * before writing them */ cpu_to_be16s(&luks->header.version); cpu_to_be32s(&luks->header.payload_offset); cpu_to_be32s(&luks->header.key_bytes); cpu_to_be32s(&luks->header.master_key_iterations); for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { cpu_to_be32s(&luks->header.key_slots[i].active); cpu_to_be32s(&luks->header.key_slots[i].iterations); cpu_to_be32s(&luks->header.key_slots[i].key_offset); cpu_to_be32s(&luks->header.key_slots[i].stripes); } /* Write out the partition header and key slot headers */ writefunc(block, 0, (const uint8_t *)&luks->header, sizeof(luks->header), opaque, &local_err); /* Delay checking local_err until we've byte-swapped */ /* Byte swap the header back to native, in case we need * to read it again later */ be16_to_cpus(&luks->header.version); be32_to_cpus(&luks->header.payload_offset); be32_to_cpus(&luks->header.key_bytes); be32_to_cpus(&luks->header.master_key_iterations); for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { be32_to_cpus(&luks->header.key_slots[i].active); be32_to_cpus(&luks->header.key_slots[i].iterations); be32_to_cpus(&luks->header.key_slots[i].key_offset); be32_to_cpus(&luks->header.key_slots[i].stripes); } if (local_err) { error_propagate(errp, local_err); goto error; } /* Write out the master key material, starting at the * sector immediately following the partition header. */ if (writefunc(block, luks->header.key_slots[0].key_offset * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, splitkey, splitkeylen, opaque, errp) != splitkeylen) { goto error; } luks->cipher_alg = luks_opts.cipher_alg; luks->cipher_mode = luks_opts.cipher_mode; luks->ivgen_alg = luks_opts.ivgen_alg; luks->ivgen_hash_alg = luks_opts.ivgen_hash_alg; luks->hash_alg = luks_opts.hash_alg; memset(masterkey, 0, luks->header.key_bytes); g_free(masterkey); memset(slotkey, 0, luks->header.key_bytes); g_free(slotkey); g_free(splitkey); g_free(password); g_free(cipher_mode_spec); qcrypto_ivgen_free(ivgen); qcrypto_cipher_free(cipher); return 0; error: if (masterkey) { memset(masterkey, 0, luks->header.key_bytes); } g_free(masterkey); if (slotkey) { memset(slotkey, 0, luks->header.key_bytes); } g_free(slotkey); g_free(splitkey); g_free(password); g_free(cipher_mode_spec); qcrypto_ivgen_free(ivgen); qcrypto_cipher_free(cipher); g_free(luks); return -1; }
static int qcrypto_block_luks_open(QCryptoBlock *block, QCryptoBlockOpenOptions *options, QCryptoBlockReadFunc readfunc, void *opaque, unsigned int flags, Error **errp) { QCryptoBlockLUKS *luks; Error *local_err = NULL; int ret = 0; size_t i; ssize_t rv; uint8_t *masterkey = NULL; size_t masterkeylen; char *ivgen_name, *ivhash_name; QCryptoCipherMode ciphermode; QCryptoCipherAlgorithm cipheralg; QCryptoIVGenAlgorithm ivalg; QCryptoCipherAlgorithm ivcipheralg; QCryptoHashAlgorithm hash; QCryptoHashAlgorithm ivhash; char *password = NULL; if (!(flags & QCRYPTO_BLOCK_OPEN_NO_IO)) { if (!options->u.luks.key_secret) { error_setg(errp, "Parameter 'key-secret' is required for cipher"); return -1; } password = qcrypto_secret_lookup_as_utf8( options->u.luks.key_secret, errp); if (!password) { return -1; } } luks = g_new0(QCryptoBlockLUKS, 1); block->opaque = luks; /* Read the entire LUKS header, minus the key material from * the underlying device */ rv = readfunc(block, 0, (uint8_t *)&luks->header, sizeof(luks->header), opaque, errp); if (rv < 0) { ret = rv; goto fail; } /* The header is always stored in big-endian format, so * convert everything to native */ be16_to_cpus(&luks->header.version); be32_to_cpus(&luks->header.payload_offset); be32_to_cpus(&luks->header.key_bytes); be32_to_cpus(&luks->header.master_key_iterations); for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { be32_to_cpus(&luks->header.key_slots[i].active); be32_to_cpus(&luks->header.key_slots[i].iterations); be32_to_cpus(&luks->header.key_slots[i].key_offset); be32_to_cpus(&luks->header.key_slots[i].stripes); } if (memcmp(luks->header.magic, qcrypto_block_luks_magic, QCRYPTO_BLOCK_LUKS_MAGIC_LEN) != 0) { error_setg(errp, "Volume is not in LUKS format"); ret = -EINVAL; goto fail; } if (luks->header.version != QCRYPTO_BLOCK_LUKS_VERSION) { error_setg(errp, "LUKS version %" PRIu32 " is not supported", luks->header.version); ret = -ENOTSUP; goto fail; } /* * The cipher_mode header contains a string that we have * to further parse, of the format * * <cipher-mode>-<iv-generator>[:<iv-hash>] * * eg cbc-essiv:sha256, cbc-plain64 */ ivgen_name = strchr(luks->header.cipher_mode, '-'); if (!ivgen_name) { ret = -EINVAL; error_setg(errp, "Unexpected cipher mode string format %s", luks->header.cipher_mode); goto fail; } *ivgen_name = '\0'; ivgen_name++; ivhash_name = strchr(ivgen_name, ':'); if (!ivhash_name) { ivhash = 0; } else { *ivhash_name = '\0'; ivhash_name++; ivhash = qcrypto_block_luks_hash_name_lookup(ivhash_name, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } } ciphermode = qcrypto_block_luks_cipher_mode_lookup(luks->header.cipher_mode, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } cipheralg = qcrypto_block_luks_cipher_name_lookup(luks->header.cipher_name, ciphermode, luks->header.key_bytes, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } hash = qcrypto_block_luks_hash_name_lookup(luks->header.hash_spec, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } ivalg = qcrypto_block_luks_ivgen_name_lookup(ivgen_name, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } if (ivalg == QCRYPTO_IVGEN_ALG_ESSIV) { if (!ivhash_name) { ret = -EINVAL; error_setg(errp, "Missing IV generator hash specification"); goto fail; } ivcipheralg = qcrypto_block_luks_essiv_cipher(cipheralg, ivhash, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } } else { /* Note we parsed the ivhash_name earlier in the cipher_mode * spec string even with plain/plain64 ivgens, but we * will ignore it, since it is irrelevant for these ivgens. * This is for compat with dm-crypt which will silently * ignore hash names with these ivgens rather than report * an error about the invalid usage */ ivcipheralg = cipheralg; } if (!(flags & QCRYPTO_BLOCK_OPEN_NO_IO)) { /* Try to find which key slot our password is valid for * and unlock the master key from that slot. */ if (qcrypto_block_luks_find_key(block, password, cipheralg, ciphermode, hash, ivalg, ivcipheralg, ivhash, &masterkey, &masterkeylen, readfunc, opaque, errp) < 0) { ret = -EACCES; goto fail; } /* We have a valid master key now, so can setup the * block device payload decryption objects */ block->kdfhash = hash; block->niv = qcrypto_cipher_get_iv_len(cipheralg, ciphermode); block->ivgen = qcrypto_ivgen_new(ivalg, ivcipheralg, ivhash, masterkey, masterkeylen, errp); if (!block->ivgen) { ret = -ENOTSUP; goto fail; } block->cipher = qcrypto_cipher_new(cipheralg, ciphermode, masterkey, masterkeylen, errp); if (!block->cipher) { ret = -ENOTSUP; goto fail; } } block->payload_offset = luks->header.payload_offset * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; luks->cipher_alg = cipheralg; luks->cipher_mode = ciphermode; luks->ivgen_alg = ivalg; luks->ivgen_hash_alg = ivhash; luks->hash_alg = hash; g_free(masterkey); g_free(password); return 0; fail: g_free(masterkey); qcrypto_cipher_free(block->cipher); qcrypto_ivgen_free(block->ivgen); g_free(luks); g_free(password); return ret; }
int es705_uart_dev_wdb(struct es705_priv *es705, const void *buf, int len) { /* At this point the command has been determined and is not part * of the data in the buffer. Its just data. Note that we donot * evaluate the contents of the data. It is up to the higher layers * to insure the the codes mode of operation matchs what is being * sent. */ int ret; u32 resp; u8 *dptr; u32 cmd = ES705_WDB_CMD << 16; dev_dbg(es705->dev, "%s(): len = 0x%08x\n", __func__, len); dptr = (char *)buf; cmd = cmd | (len & 0xFFFF); dev_dbg(es705->dev, "%s(): cmd = 0x%08x\n", __func__, cmd); cmd = cpu_to_be32(cmd); ret = es705_uart_write_then_read(es705, &cmd, sizeof(cmd), &resp, 0); if (ret < 0) { dev_err(es705->dev, "%s(): cmd write err=%hu\n", __func__, ret); goto wdb_err; } be32_to_cpus(&resp); dev_dbg(es705->dev, "%s(): resp = 0x%08x\n", __func__, resp); if ((resp & 0xffff0000) != (ES705_WDB_CMD << 16)) { dev_err(es705->dev, "%s(): invalid write data block 0x%08x\n", __func__, resp); goto wdb_err; } /* The API requires that the subsequent writes are to be * a byte stream (one byte per transport transaction) */ ret = es705_uart_write(es705, dptr, len); if (ret < 0) { dev_err(es705->dev, "%s(): wdb error =%d\n", __func__, ret); goto wdb_err; } /* One last ACK read */ ret = es705_uart_read(es705, &resp, 4); if (ret < 0) { dev_err(es705->dev, "%s(): last ack %d\n", __func__, ret); goto wdb_err; } if (resp & 0xff000000) { dev_err(es705->dev, "%s(): write data block error 0x%0x\n", __func__, resp); goto wdb_err; } dev_dbg(es705->dev, "%s(): len = %d\n", __func__, len); goto exit; wdb_err: len = -EIO; exit: return len; }
int es705_uart_dev_rdb(struct es705_priv *es705, void *buf, int id) { u32 cmd; u32 resp; u8 *dptr; int ret; int size; int rdcnt = 0; dptr = (u8 *) buf; /* Read voice sense keyword data block request. */ cmd = (ES705_RDB_CMD << 16) | (id & 0xFFFF); cmd = cpu_to_be32(cmd); ret = es705_uart_write(es705, (char *)&cmd, 4); if (ret < 0) { dev_err(es705->dev, "%s(): RDB cmd write err = %d\n", __func__, ret); goto rdb_err; } /* Refer to "ES705 Advanced API Guide" for details of interface */ usleep_range(10000, 11000); ret = es705_uart_read(es705, (char *)&resp, 4); if (ret < 0) { dev_err(es705->dev, "%s(): error sending request = %d\n", __func__, ret); goto rdb_err; } be32_to_cpus(&resp); size = resp & 0xffff; dev_dbg(es705->dev, "%s(): resp=0x%08x size=%d\n", __func__, resp, size); if ((resp & 0xffff0000) != (ES705_RDB_CMD << 16)) { dev_err(es705->dev, "%s(): invalid read v-s data block response = 0x%08x\n", __func__, resp); goto rdb_err; } if (size == 0) { dev_err(es705->dev, "%s(): read request return size of 0\n", __func__); goto rdb_err; } if (size > PARSE_BUFFER_SIZE) size = PARSE_BUFFER_SIZE; for (rdcnt = 0; rdcnt < size; rdcnt++, dptr++) { ret = es705_uart_read(es705, dptr, 1); if (ret < 0) { dev_err(es705->dev, "%s(): data block ed error %d bytes ret = %d\n", __func__, rdcnt, ret); goto rdb_err; } } es705->rdb_read_count = size; ret = 0; goto exit; rdb_err: es705->rdb_read_count = 0; ret = -EIO; exit: return ret; }
static int vpc_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVVPCState *s = bs->opaque; int i; VHDFooter *footer; VHDDynDiskHeader *dyndisk_header; uint8_t buf[HEADER_SIZE]; uint32_t checksum; uint64_t computed_size; uint64_t pagetable_size; int disk_type = VHD_DYNAMIC; int ret; ret = bdrv_pread(bs->file->bs, 0, s->footer_buf, HEADER_SIZE); if (ret < 0) { goto fail; } footer = (VHDFooter *) s->footer_buf; if (strncmp(footer->creator, "conectix", 8)) { int64_t offset = bdrv_getlength(bs->file->bs); if (offset < 0) { ret = offset; goto fail; } else if (offset < HEADER_SIZE) { ret = -EINVAL; goto fail; } /* If a fixed disk, the footer is found only at the end of the file */ ret = bdrv_pread(bs->file->bs, offset-HEADER_SIZE, s->footer_buf, HEADER_SIZE); if (ret < 0) { goto fail; } if (strncmp(footer->creator, "conectix", 8)) { error_setg(errp, "invalid VPC image"); ret = -EINVAL; goto fail; } disk_type = VHD_FIXED; } checksum = be32_to_cpu(footer->checksum); footer->checksum = 0; if (vpc_checksum(s->footer_buf, HEADER_SIZE) != checksum) fprintf(stderr, "block-vpc: The header checksum of '%s' is " "incorrect.\n", bs->filename); /* Write 'checksum' back to footer, or else will leave it with zero. */ footer->checksum = cpu_to_be32(checksum); // The visible size of a image in Virtual PC depends on the geometry // rather than on the size stored in the footer (the size in the footer // is too large usually) bs->total_sectors = (int64_t) be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl; /* Images that have exactly the maximum geometry are probably bigger and * would be truncated if we adhered to the geometry for them. Rely on * footer->current_size for them. */ if (bs->total_sectors == VHD_MAX_GEOMETRY) { bs->total_sectors = be64_to_cpu(footer->current_size) / BDRV_SECTOR_SIZE; } /* Allow a maximum disk size of approximately 2 TB */ if (bs->total_sectors >= VHD_MAX_SECTORS) { ret = -EFBIG; goto fail; } if (disk_type == VHD_DYNAMIC) { ret = bdrv_pread(bs->file->bs, be64_to_cpu(footer->data_offset), buf, HEADER_SIZE); if (ret < 0) { goto fail; } dyndisk_header = (VHDDynDiskHeader *) buf; if (strncmp(dyndisk_header->magic, "cxsparse", 8)) { ret = -EINVAL; goto fail; } s->block_size = be32_to_cpu(dyndisk_header->block_size); if (!is_power_of_2(s->block_size) || s->block_size < BDRV_SECTOR_SIZE) { error_setg(errp, "Invalid block size %" PRIu32, s->block_size); ret = -EINVAL; goto fail; } s->bitmap_size = ((s->block_size / (8 * 512)) + 511) & ~511; s->max_table_entries = be32_to_cpu(dyndisk_header->max_table_entries); if ((bs->total_sectors * 512) / s->block_size > 0xffffffffU) { ret = -EINVAL; goto fail; } if (s->max_table_entries > (VHD_MAX_SECTORS * 512) / s->block_size) { ret = -EINVAL; goto fail; } computed_size = (uint64_t) s->max_table_entries * s->block_size; if (computed_size < bs->total_sectors * 512) { ret = -EINVAL; goto fail; } if (s->max_table_entries > SIZE_MAX / 4 || s->max_table_entries > (int) INT_MAX / 4) { error_setg(errp, "Max Table Entries too large (%" PRId32 ")", s->max_table_entries); ret = -EINVAL; goto fail; } pagetable_size = (uint64_t) s->max_table_entries * 4; s->pagetable = qemu_try_blockalign(bs->file->bs, pagetable_size); if (s->pagetable == NULL) { ret = -ENOMEM; goto fail; } s->bat_offset = be64_to_cpu(dyndisk_header->table_offset); ret = bdrv_pread(bs->file->bs, s->bat_offset, s->pagetable, pagetable_size); if (ret < 0) { goto fail; } s->free_data_block_offset = ROUND_UP(s->bat_offset + pagetable_size, 512); for (i = 0; i < s->max_table_entries; i++) { be32_to_cpus(&s->pagetable[i]); if (s->pagetable[i] != 0xFFFFFFFF) { int64_t next = (512 * (int64_t) s->pagetable[i]) + s->bitmap_size + s->block_size; if (next > s->free_data_block_offset) { s->free_data_block_offset = next; } } } if (s->free_data_block_offset > bdrv_getlength(bs->file->bs)) { error_setg(errp, "block-vpc: free_data_block_offset points after " "the end of file. The image has been truncated."); ret = -EINVAL; goto fail; } s->last_bitmap_offset = (int64_t) -1; #ifdef CACHE s->pageentry_u8 = g_malloc(512); s->pageentry_u32 = s->pageentry_u8; s->pageentry_u16 = s->pageentry_u8; s->last_pagetable = -1; #endif } qemu_co_mutex_init(&s->lock); /* Disable migration when VHD images are used */ error_setg(&s->migration_blocker, "The vpc format used by node '%s' " "does not support live migration", bdrv_get_device_or_node_name(bs)); migrate_add_blocker(s->migration_blocker); return 0; fail: qemu_vfree(s->pagetable); #ifdef CACHE g_free(s->pageentry_u8); #endif return ret; }
int es705_uart_dev_rdb(struct es705_priv *es705, void *buf, int id) { u32 cmd; u32 resp; u8 *dptr; int ret; int size; int rdcnt = 0; #if defined(CONFIG_MQ100_SENSOR_HUB) u8 pad; /* Take Mutex for the duration of this UART transaction. */ mutex_lock(&es705_priv.uart_transaction_mutex); #endif dptr = (u8 *)buf; /* Read voice sense keyword data block request. */ cmd = (ES705_RDB_CMD << 16) | (id & 0xFFFF); cmd = cpu_to_be32(cmd); ret = es705_uart_write(es705, (char *)&cmd, 4); if (ret < 0) { dev_err(es705->dev, "%s(): RDB cmd write err = %d\n", __func__, ret); goto rdb_err; } /* Refer to "ES705 Advanced API Guide" for details of interface */ usleep_range(10000, 10000); ret = es705_uart_read(es705, (char *)&resp, 4); if (ret < 0) { dev_err(es705->dev, "%s(): error sending request = %d\n", __func__, ret); goto rdb_err; } be32_to_cpus(&resp); size = resp & 0xffff; dev_dbg(es705->dev, "%s(): resp=0x%08x size=%d\n", __func__, resp, size); if ((resp & 0xffff0000) != (ES705_RDB_CMD << 16)) { dev_err(es705->dev, "%s(): invalid read v-s data block response = 0x%08x\n", __func__, resp); goto rdb_err; } if (size == 0) { dev_err(es705->dev, "%s(): read request return size of 0\n", __func__); goto rdb_err; } if (size > PARSE_BUFFER_SIZE) size = PARSE_BUFFER_SIZE; for (rdcnt = 0; rdcnt < size; rdcnt++, dptr++) { ret = es705_uart_read(es705, dptr, 1); if (ret < 0) { dev_err(es705->dev, "%s(): data block ed error %d bytes ret = %d\n", __func__, rdcnt, ret); goto rdb_err; } } es705->rdb_read_count = size; #if defined(CONFIG_MQ100_SENSOR_HUB) size = PAD4(size); dev_dbg(es705->dev, "%s: Discarding %d pad bytes\n", __func__, size); ret = 0; while ((size > 0) && (ret >= 0)) { ret = es705_uart_read(es705, &pad, 1); size--; } #endif ret = 0; goto exit; rdb_err: es705->rdb_read_count = 0; ret = -EIO; exit: #if defined(CONFIG_MQ100_SENSOR_HUB) /* release UART transaction mutex */ mutex_unlock(&es705_priv.uart_transaction_mutex); #endif return ret; }
int nbd_receive_negotiate(QIOChannel *ioc, const char *name, QCryptoTLSCreds *tlscreds, const char *hostname, QIOChannel **outioc, NBDExportInfo *info, Error **errp) { char buf[256]; uint64_t magic; int rc; bool zeroes = true; trace_nbd_receive_negotiate(tlscreds, hostname ? hostname : "<null>"); rc = -EINVAL; if (outioc) { *outioc = NULL; } if (tlscreds && !outioc) { error_setg(errp, "Output I/O channel required for TLS"); goto fail; } if (nbd_read(ioc, buf, 8, errp) < 0) { error_prepend(errp, "Failed to read data"); goto fail; } buf[8] = '\0'; if (strlen(buf) == 0) { error_setg(errp, "Server connection closed unexpectedly"); goto fail; } magic = ldq_be_p(buf); trace_nbd_receive_negotiate_magic(magic); if (memcmp(buf, "NBDMAGIC", 8) != 0) { error_setg(errp, "Invalid magic received"); goto fail; } if (nbd_read(ioc, &magic, sizeof(magic), errp) < 0) { error_prepend(errp, "Failed to read magic"); goto fail; } magic = be64_to_cpu(magic); trace_nbd_receive_negotiate_magic(magic); if (magic == NBD_OPTS_MAGIC) { uint32_t clientflags = 0; uint16_t globalflags; bool fixedNewStyle = false; if (nbd_read(ioc, &globalflags, sizeof(globalflags), errp) < 0) { error_prepend(errp, "Failed to read server flags"); goto fail; } globalflags = be16_to_cpu(globalflags); trace_nbd_receive_negotiate_server_flags(globalflags); if (globalflags & NBD_FLAG_FIXED_NEWSTYLE) { fixedNewStyle = true; clientflags |= NBD_FLAG_C_FIXED_NEWSTYLE; } if (globalflags & NBD_FLAG_NO_ZEROES) { zeroes = false; clientflags |= NBD_FLAG_C_NO_ZEROES; } /* client requested flags */ clientflags = cpu_to_be32(clientflags); if (nbd_write(ioc, &clientflags, sizeof(clientflags), errp) < 0) { error_prepend(errp, "Failed to send clientflags field"); goto fail; } if (tlscreds) { if (fixedNewStyle) { *outioc = nbd_receive_starttls(ioc, tlscreds, hostname, errp); if (!*outioc) { goto fail; } ioc = *outioc; } else { error_setg(errp, "Server does not support STARTTLS"); goto fail; } } if (!name) { trace_nbd_receive_negotiate_default_name(); name = ""; } if (fixedNewStyle) { int result; /* Try NBD_OPT_GO first - if it works, we are done (it * also gives us a good message if the server requires * TLS). If it is not available, fall back to * NBD_OPT_LIST for nicer error messages about a missing * export, then use NBD_OPT_EXPORT_NAME. */ result = nbd_opt_go(ioc, name, info, errp); if (result < 0) { goto fail; } if (result > 0) { return 0; } /* Check our desired export is present in the * server export list. Since NBD_OPT_EXPORT_NAME * cannot return an error message, running this * query gives us better error reporting if the * export name is not available. */ if (nbd_receive_query_exports(ioc, name, errp) < 0) { goto fail; } } /* write the export name request */ if (nbd_send_option_request(ioc, NBD_OPT_EXPORT_NAME, -1, name, errp) < 0) { goto fail; } /* Read the response */ if (nbd_read(ioc, &info->size, sizeof(info->size), errp) < 0) { error_prepend(errp, "Failed to read export length"); goto fail; } be64_to_cpus(&info->size); if (nbd_read(ioc, &info->flags, sizeof(info->flags), errp) < 0) { error_prepend(errp, "Failed to read export flags"); goto fail; } be16_to_cpus(&info->flags); } else if (magic == NBD_CLIENT_MAGIC) { uint32_t oldflags; if (name) { error_setg(errp, "Server does not support export names"); goto fail; } if (tlscreds) { error_setg(errp, "Server does not support STARTTLS"); goto fail; } if (nbd_read(ioc, &info->size, sizeof(info->size), errp) < 0) { error_prepend(errp, "Failed to read export length"); goto fail; } be64_to_cpus(&info->size); if (nbd_read(ioc, &oldflags, sizeof(oldflags), errp) < 0) { error_prepend(errp, "Failed to read export flags"); goto fail; } be32_to_cpus(&oldflags); if (oldflags & ~0xffff) { error_setg(errp, "Unexpected export flags %0x" PRIx32, oldflags); goto fail; } info->flags = oldflags; } else { error_setg(errp, "Bad magic received"); goto fail; } trace_nbd_receive_negotiate_size_flags(info->size, info->flags); if (zeroes && nbd_drop(ioc, 124, errp) < 0) { error_prepend(errp, "Failed to read reserved block"); goto fail; } rc = 0; fail: return rc; }
/* Returns -1 if NBD_OPT_GO proves the export @wantname cannot be * used, 0 if NBD_OPT_GO is unsupported (fall back to NBD_OPT_LIST and * NBD_OPT_EXPORT_NAME in that case), and > 0 if the export is good to * go (with @info populated). */ static int nbd_opt_go(QIOChannel *ioc, const char *wantname, NBDExportInfo *info, Error **errp) { nbd_opt_reply reply; uint32_t len = strlen(wantname); uint16_t type; int error; char *buf; /* The protocol requires that the server send NBD_INFO_EXPORT with * a non-zero flags (at least NBD_FLAG_HAS_FLAGS must be set); so * flags still 0 is a witness of a broken server. */ info->flags = 0; trace_nbd_opt_go_start(wantname); buf = g_malloc(4 + len + 2 + 2 * info->request_sizes + 1); stl_be_p(buf, len); memcpy(buf + 4, wantname, len); /* At most one request, everything else up to server */ stw_be_p(buf + 4 + len, info->request_sizes); if (info->request_sizes) { stw_be_p(buf + 4 + len + 2, NBD_INFO_BLOCK_SIZE); } error = nbd_send_option_request(ioc, NBD_OPT_GO, 4 + len + 2 + 2 * info->request_sizes, buf, errp); g_free(buf); if (error < 0) { return -1; } while (1) { if (nbd_receive_option_reply(ioc, NBD_OPT_GO, &reply, errp) < 0) { return -1; } error = nbd_handle_reply_err(ioc, &reply, errp); if (error <= 0) { return error; } len = reply.length; if (reply.type == NBD_REP_ACK) { /* Server is done sending info and moved into transmission phase, but make sure it sent flags */ if (len) { error_setg(errp, "server sent invalid NBD_REP_ACK"); return -1; } if (!info->flags) { error_setg(errp, "broken server omitted NBD_INFO_EXPORT"); return -1; } trace_nbd_opt_go_success(); return 1; } if (reply.type != NBD_REP_INFO) { error_setg(errp, "unexpected reply type %" PRIx32 " (%s), expected %x", reply.type, nbd_rep_lookup(reply.type), NBD_REP_INFO); nbd_send_opt_abort(ioc); return -1; } if (len < sizeof(type)) { error_setg(errp, "NBD_REP_INFO length %" PRIu32 " is too short", len); nbd_send_opt_abort(ioc); return -1; } if (nbd_read(ioc, &type, sizeof(type), errp) < 0) { error_prepend(errp, "failed to read info type"); nbd_send_opt_abort(ioc); return -1; } len -= sizeof(type); be16_to_cpus(&type); switch (type) { case NBD_INFO_EXPORT: if (len != sizeof(info->size) + sizeof(info->flags)) { error_setg(errp, "remaining export info len %" PRIu32 " is unexpected size", len); nbd_send_opt_abort(ioc); return -1; } if (nbd_read(ioc, &info->size, sizeof(info->size), errp) < 0) { error_prepend(errp, "failed to read info size"); nbd_send_opt_abort(ioc); return -1; } be64_to_cpus(&info->size); if (nbd_read(ioc, &info->flags, sizeof(info->flags), errp) < 0) { error_prepend(errp, "failed to read info flags"); nbd_send_opt_abort(ioc); return -1; } be16_to_cpus(&info->flags); trace_nbd_receive_negotiate_size_flags(info->size, info->flags); break; case NBD_INFO_BLOCK_SIZE: if (len != sizeof(info->min_block) * 3) { error_setg(errp, "remaining export info len %" PRIu32 " is unexpected size", len); nbd_send_opt_abort(ioc); return -1; } if (nbd_read(ioc, &info->min_block, sizeof(info->min_block), errp) < 0) { error_prepend(errp, "failed to read info minimum block size"); nbd_send_opt_abort(ioc); return -1; } be32_to_cpus(&info->min_block); if (!is_power_of_2(info->min_block)) { error_setg(errp, "server minimum block size %" PRId32 "is not a power of two", info->min_block); nbd_send_opt_abort(ioc); return -1; } if (nbd_read(ioc, &info->opt_block, sizeof(info->opt_block), errp) < 0) { error_prepend(errp, "failed to read info preferred block size"); nbd_send_opt_abort(ioc); return -1; } be32_to_cpus(&info->opt_block); if (!is_power_of_2(info->opt_block) || info->opt_block < info->min_block) { error_setg(errp, "server preferred block size %" PRId32 "is not valid", info->opt_block); nbd_send_opt_abort(ioc); return -1; } if (nbd_read(ioc, &info->max_block, sizeof(info->max_block), errp) < 0) { error_prepend(errp, "failed to read info maximum block size"); nbd_send_opt_abort(ioc); return -1; } be32_to_cpus(&info->max_block); trace_nbd_opt_go_info_block_size(info->min_block, info->opt_block, info->max_block); break; default: trace_nbd_opt_go_info_unknown(type, nbd_info_lookup(type)); if (nbd_drop(ioc, len, errp) < 0) { error_prepend(errp, "Failed to read info payload"); nbd_send_opt_abort(ioc); return -1; } break; } } }