static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val) { uint32_t romask = ldl_le_p(&s->romask[addr]); uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); uint32_t oldval = ldl_le_p(&s->mmior[addr]); stl_le_p(&s->mmior[addr], ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); }
static void read_partition(uint8_t *p, struct partition_record *r) { r->bootable = p[0]; r->start_head = p[1]; r->start_cylinder = p[3] | ((p[2] << 2) & 0x0300); r->start_sector = p[2] & 0x3f; r->system = p[4]; r->end_head = p[5]; r->end_cylinder = p[7] | ((p[6] << 2) & 0x300); r->end_sector = p[6] & 0x3f; r->start_sector_abs = ldl_le_p(p + 8); r->nb_sectors_abs = ldl_le_p(p + 12); }
static void v9fs_req_recv(P9Req *req, uint8_t id) { QVirtIO9P *v9p = req->v9p; P9Hdr hdr; int i; for (i = 0; i < 10; i++) { qvirtio_wait_queue_isr(v9p->dev, v9p->vq, 1000 * 1000); v9fs_memread(req, &hdr, 7); hdr.size = ldl_le_p(&hdr.size); hdr.tag = lduw_le_p(&hdr.tag); if (hdr.size >= 7) { break; } v9fs_memrewind(req, 7); } g_assert_cmpint(hdr.size, >=, 7); g_assert_cmpint(hdr.size, <=, P9_MAX_SIZE); g_assert_cmpint(hdr.tag, ==, req->tag); if (hdr.id != id) { g_printerr("Received response %d (%s) instead of %d (%s)\n", hdr.id, rmessage_name(hdr.id), id, rmessage_name(id)); if (hdr.id == P9_RLERROR) { uint32_t err; v9fs_uint32_read(req, &err); g_printerr("Rlerror has errno %d (%s)\n", err, strerror(err)); } } g_assert_cmpint(hdr.id, ==, id); }
/* warning: addr must be aligned */ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result, enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegion *mr; hwaddr l = 4; hwaddr addr1; MemTxResult r; bool release_lock = false; RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false); if (l < 4 || !IS_DIRECT(mr, false)) { release_lock |= prepare_mmio_access(mr); /* I/O case */ r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap32(val); } #else if (endian == DEVICE_BIG_ENDIAN) { val = bswap32(val); } #endif } else { /* RAM case */ ptr = MAP_RAM(mr, addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = ldl_le_p(ptr); break; case DEVICE_BIG_ENDIAN: val = ldl_be_p(ptr); break; default: val = ldl_p(ptr); break; } r = MEMTX_OK; } if (result) { *result = r; } if (release_lock) { qemu_mutex_unlock_iothread(); } RCU_READ_UNLOCK(); return val; }
static int virtio_crypto_cipher_session_helper(VirtIODevice *vdev, CryptoDevBackendSymSessionInfo *info, struct virtio_crypto_cipher_session_para *cipher_para, struct iovec **iov, unsigned int *out_num) { VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); unsigned int num = *out_num; info->cipher_alg = ldl_le_p(&cipher_para->algo); info->key_len = ldl_le_p(&cipher_para->keylen); info->direction = ldl_le_p(&cipher_para->op); DPRINTF("cipher_alg=%" PRIu32 ", info->direction=%" PRIu32 "\n", info->cipher_alg, info->direction); if (info->key_len > vcrypto->conf.max_cipher_key_len) { error_report("virtio-crypto length of cipher key is too big: %u", info->key_len); return -VIRTIO_CRYPTO_ERR; } /* Get cipher key */ if (info->key_len > 0) { size_t s; DPRINTF("keylen=%" PRIu32 "\n", info->key_len); info->cipher_key = g_malloc(info->key_len); s = iov_to_buf(*iov, num, 0, info->cipher_key, info->key_len); if (unlikely(s != info->key_len)) { virtio_error(vdev, "virtio-crypto cipher key incorrect"); return -EFAULT; } iov_discard_front(iov, &num, info->key_len); *out_num = num; } return 0; }
static void flash_write(void *opaque, hwaddr offset, uint64_t value, unsigned int size) { NRF51NVMState *s = NRF51_NVM(opaque); if (s->config & NRF51_NVMC_CONFIG_WEN) { uint32_t oldval; assert(offset + size <= s->flash_size); /* NOR Flash only allows bits to be flipped from 1's to 0's on write */ oldval = ldl_le_p(s->storage + offset); oldval &= value; stl_le_p(s->storage + offset, oldval); memory_region_flush_rom_device(&s->flash, offset, size); } else { qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash write 0x%" HWADDR_PRIx" while flash not writable.\n", __func__, offset); } }
static int virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto, struct virtio_crypto_sym_data_req *req, CryptoDevBackendSymOpInfo **sym_op_info, struct iovec *iov, unsigned int out_num) { VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); uint32_t op_type; CryptoDevBackendSymOpInfo *op_info; op_type = ldl_le_p(&req->op_type); if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para, NULL, iov, out_num); if (!op_info) { return -EFAULT; } op_info->op_type = op_type; } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { op_info = virtio_crypto_sym_op_helper(vdev, NULL, &req->u.chain.para, iov, out_num); if (!op_info) { return -EFAULT; } op_info->op_type = op_type; } else { /* VIRTIO_CRYPTO_SYM_OP_NONE */ error_report("virtio-crypto unsupported cipher type"); return -VIRTIO_CRYPTO_NOTSUPP; } *sym_op_info = op_info; return 0; }
static uint32_t amdvi_readl(AMDVIState *s, hwaddr addr) { return ldl_le_p(&s->mmior[addr]); }
static void pfifo_run_pusher(NV2AState *d) { uint32_t *push0 = &d->pfifo.regs[NV_PFIFO_CACHE1_PUSH0]; uint32_t *push1 = &d->pfifo.regs[NV_PFIFO_CACHE1_PUSH1]; uint32_t *dma_subroutine = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_SUBROUTINE]; uint32_t *dma_state = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_STATE]; uint32_t *dma_push = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_PUSH]; uint32_t *dma_get = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_GET]; uint32_t *dma_put = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_PUT]; uint32_t *dma_dcount = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_DCOUNT]; uint32_t *status = &d->pfifo.regs[NV_PFIFO_CACHE1_STATUS]; uint32_t *get_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_GET]; uint32_t *put_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_PUT]; if (!GET_MASK(*push0, NV_PFIFO_CACHE1_PUSH0_ACCESS)) return; if (!GET_MASK(*dma_push, NV_PFIFO_CACHE1_DMA_PUSH_ACCESS)) return; /* suspended */ if (GET_MASK(*dma_push, NV_PFIFO_CACHE1_DMA_PUSH_STATUS)) return; // TODO: should we become busy here?? // NV_PFIFO_CACHE1_DMA_PUSH_STATE _BUSY unsigned int channel_id = GET_MASK(*push1, NV_PFIFO_CACHE1_PUSH1_CHID); /* Channel running DMA mode */ uint32_t channel_modes = d->pfifo.regs[NV_PFIFO_MODE]; assert(channel_modes & (1 << channel_id)); assert(GET_MASK(*push1, NV_PFIFO_CACHE1_PUSH1_MODE) == NV_PFIFO_CACHE1_PUSH1_MODE_DMA); /* We're running so there should be no pending errors... */ assert(GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_ERROR) == NV_PFIFO_CACHE1_DMA_STATE_ERROR_NONE); hwaddr dma_instance = GET_MASK(d->pfifo.regs[NV_PFIFO_CACHE1_DMA_INSTANCE], NV_PFIFO_CACHE1_DMA_INSTANCE_ADDRESS) << 4; hwaddr dma_len; uint8_t *dma = nv_dma_map(d, dma_instance, &dma_len); while (true) { uint32_t dma_get_v = *dma_get; uint32_t dma_put_v = *dma_put; if (dma_get_v == dma_put_v) break; if (dma_get_v >= dma_len) { assert(false); SET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_ERROR, NV_PFIFO_CACHE1_DMA_STATE_ERROR_PROTECTION); break; } uint32_t word = ldl_le_p((uint32_t*)(dma + dma_get_v)); dma_get_v += 4; uint32_t method_type = GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD_TYPE); uint32_t method_subchannel = GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_SUBCHANNEL); uint32_t method = GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD) << 2; uint32_t method_count = GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD_COUNT); uint32_t subroutine_state = GET_MASK(*dma_subroutine, NV_PFIFO_CACHE1_DMA_SUBROUTINE_STATE); if (method_count) { /* full */ if (*status & NV_PFIFO_CACHE1_STATUS_HIGH_MARK) return; /* data word of methods command */ d->pfifo.regs[NV_PFIFO_CACHE1_DMA_DATA_SHADOW] = word; uint32_t put = *put_reg; uint32_t get = *get_reg; assert((method & 3) == 0); uint32_t method_entry = 0; SET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_ADDRESS, method >> 2); SET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_TYPE, method_type); SET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_SUBCHANNEL, method_subchannel); // NV2A_DPRINTF("push %d 0x%x 0x%x - subch %d\n", put/4, method_entry, word, method_subchannel); assert(put < 128*4 && (put%4) == 0); d->pfifo.regs[NV_PFIFO_CACHE1_METHOD + put*2] = method_entry; d->pfifo.regs[NV_PFIFO_CACHE1_DATA + put*2] = word; uint32_t new_put = (put+4) & 0x1fc; *put_reg = new_put; if (new_put == get) { // set high mark *status |= NV_PFIFO_CACHE1_STATUS_HIGH_MARK; } if (*status & NV_PFIFO_CACHE1_STATUS_LOW_MARK) { // unset low mark *status &= ~NV_PFIFO_CACHE1_STATUS_LOW_MARK; // signal puller qemu_cond_signal(&d->pfifo.puller_cond); } if (method_type == NV_PFIFO_CACHE1_DMA_STATE_METHOD_TYPE_INC) { SET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD, (method + 4) >> 2); } SET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD_COUNT, method_count - 1); (*dma_dcount)++; } else {
static int64_t virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto, struct virtio_crypto_sym_create_session_req *sess_req, uint32_t queue_id, uint32_t opcode, struct iovec *iov, unsigned int out_num) { VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); CryptoDevBackendSymSessionInfo info; int64_t session_id; int queue_index; uint32_t op_type; Error *local_err = NULL; int ret; memset(&info, 0, sizeof(info)); op_type = ldl_le_p(&sess_req->op_type); info.op_type = op_type; info.op_code = opcode; if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { ret = virtio_crypto_cipher_session_helper(vdev, &info, &sess_req->u.cipher.para, &iov, &out_num); if (ret < 0) { goto err; } } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { size_t s; /* cipher part */ ret = virtio_crypto_cipher_session_helper(vdev, &info, &sess_req->u.chain.para.cipher_param, &iov, &out_num); if (ret < 0) { goto err; } /* hash part */ info.alg_chain_order = ldl_le_p( &sess_req->u.chain.para.alg_chain_order); info.add_len = ldl_le_p(&sess_req->u.chain.para.aad_len); info.hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode); if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) { info.hash_alg = ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo); info.auth_key_len = ldl_le_p( &sess_req->u.chain.para.u.mac_param.auth_key_len); info.hash_result_len = ldl_le_p( &sess_req->u.chain.para.u.mac_param.hash_result_len); if (info.auth_key_len > vcrypto->conf.max_auth_key_len) { error_report("virtio-crypto length of auth key is too big: %u", info.auth_key_len); ret = -VIRTIO_CRYPTO_ERR; goto err; } /* get auth key */ if (info.auth_key_len > 0) { DPRINTF("auth_keylen=%" PRIu32 "\n", info.auth_key_len); info.auth_key = g_malloc(info.auth_key_len); s = iov_to_buf(iov, out_num, 0, info.auth_key, info.auth_key_len); if (unlikely(s != info.auth_key_len)) { virtio_error(vdev, "virtio-crypto authenticated key incorrect"); ret = -EFAULT; goto err; } iov_discard_front(&iov, &out_num, info.auth_key_len); } } else if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) { info.hash_alg = ldl_le_p( &sess_req->u.chain.para.u.hash_param.algo); info.hash_result_len = ldl_le_p( &sess_req->u.chain.para.u.hash_param.hash_result_len); } else { /* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */ error_report("unsupported hash mode"); ret = -VIRTIO_CRYPTO_NOTSUPP; goto err; } } else { /* VIRTIO_CRYPTO_SYM_OP_NONE */ error_report("unsupported cipher op_type: VIRTIO_CRYPTO_SYM_OP_NONE"); ret = -VIRTIO_CRYPTO_NOTSUPP; goto err; } queue_index = virtio_crypto_vq2q(queue_id); session_id = cryptodev_backend_sym_create_session( vcrypto->cryptodev, &info, queue_index, &local_err); if (session_id >= 0) { DPRINTF("create session_id=%" PRIu64 " successfully\n", session_id); ret = session_id; } else { if (local_err) { error_report_err(local_err); } ret = -VIRTIO_CRYPTO_ERR; } err: g_free(info.cipher_key); g_free(info.auth_key); return ret; }
static int virtio_crypto_handle_request(VirtIOCryptoReq *request) { VirtIOCrypto *vcrypto = request->vcrypto; VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); VirtQueueElement *elem = &request->elem; int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq)); struct virtio_crypto_op_data_req req; int ret; struct iovec *in_iov; struct iovec *out_iov; unsigned in_num; unsigned out_num; uint32_t opcode; uint8_t status = VIRTIO_CRYPTO_ERR; uint64_t session_id; CryptoDevBackendSymOpInfo *sym_op_info = NULL; Error *local_err = NULL; if (elem->out_num < 1 || elem->in_num < 1) { virtio_error(vdev, "virtio-crypto dataq missing headers"); return -1; } out_num = elem->out_num; out_iov = elem->out_sg; in_num = elem->in_num; in_iov = elem->in_sg; if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req)) != sizeof(req))) { virtio_error(vdev, "virtio-crypto request outhdr too short"); return -1; } iov_discard_front(&out_iov, &out_num, sizeof(req)); if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_crypto_inhdr)) { virtio_error(vdev, "virtio-crypto request inhdr too short"); return -1; } /* We always touch the last byte, so just see how big in_iov is. */ request->in_len = iov_size(in_iov, in_num); request->in = (void *)in_iov[in_num - 1].iov_base + in_iov[in_num - 1].iov_len - sizeof(struct virtio_crypto_inhdr); iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr)); /* * The length of operation result, including dest_data * and digest_result if exists. */ request->in_num = in_num; request->in_iov = in_iov; opcode = ldl_le_p(&req.header.opcode); session_id = ldq_le_p(&req.header.session_id); switch (opcode) { case VIRTIO_CRYPTO_CIPHER_ENCRYPT: case VIRTIO_CRYPTO_CIPHER_DECRYPT: ret = virtio_crypto_handle_sym_req(vcrypto, &req.u.sym_req, &sym_op_info, out_iov, out_num); /* Serious errors, need to reset virtio crypto device */ if (ret == -EFAULT) { return -1; } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) { virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); virtio_crypto_free_request(request); } else { sym_op_info->session_id = session_id; /* Set request's parameter */ request->flags = CRYPTODEV_BACKEND_ALG_SYM; request->u.sym_op_info = sym_op_info; ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev, request, queue_index, &local_err); if (ret < 0) { status = -ret; if (local_err) { error_report_err(local_err); } } else { /* ret == VIRTIO_CRYPTO_OK */ status = ret; } virtio_crypto_req_complete(request, status); virtio_crypto_free_request(request); } break; case VIRTIO_CRYPTO_HASH: case VIRTIO_CRYPTO_MAC: case VIRTIO_CRYPTO_AEAD_ENCRYPT: case VIRTIO_CRYPTO_AEAD_DECRYPT: default: error_report("virtio-crypto unsupported dataq opcode: %u", opcode); virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); virtio_crypto_free_request(request); } return 0; }
static CryptoDevBackendSymOpInfo * virtio_crypto_sym_op_helper(VirtIODevice *vdev, struct virtio_crypto_cipher_para *cipher_para, struct virtio_crypto_alg_chain_data_para *alg_chain_para, struct iovec *iov, unsigned int out_num) { VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); CryptoDevBackendSymOpInfo *op_info; uint32_t src_len = 0, dst_len = 0; uint32_t iv_len = 0; uint32_t aad_len = 0, hash_result_len = 0; uint32_t hash_start_src_offset = 0, len_to_hash = 0; uint32_t cipher_start_src_offset = 0, len_to_cipher = 0; uint64_t max_len, curr_size = 0; size_t s; /* Plain cipher */ if (cipher_para) { iv_len = ldl_le_p(&cipher_para->iv_len); src_len = ldl_le_p(&cipher_para->src_data_len); dst_len = ldl_le_p(&cipher_para->dst_data_len); } else if (alg_chain_para) { /* Algorithm chain */ iv_len = ldl_le_p(&alg_chain_para->iv_len); src_len = ldl_le_p(&alg_chain_para->src_data_len); dst_len = ldl_le_p(&alg_chain_para->dst_data_len); aad_len = ldl_le_p(&alg_chain_para->aad_len); hash_result_len = ldl_le_p(&alg_chain_para->hash_result_len); hash_start_src_offset = ldl_le_p( &alg_chain_para->hash_start_src_offset); cipher_start_src_offset = ldl_le_p( &alg_chain_para->cipher_start_src_offset); len_to_cipher = ldl_le_p(&alg_chain_para->len_to_cipher); len_to_hash = ldl_le_p(&alg_chain_para->len_to_hash); } else { return NULL; } max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len; if (unlikely(max_len > vcrypto->conf.max_size)) { virtio_error(vdev, "virtio-crypto too big length"); return NULL; } op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len); op_info->iv_len = iv_len; op_info->src_len = src_len; op_info->dst_len = dst_len; op_info->aad_len = aad_len; op_info->digest_result_len = hash_result_len; op_info->hash_start_src_offset = hash_start_src_offset; op_info->len_to_hash = len_to_hash; op_info->cipher_start_src_offset = cipher_start_src_offset; op_info->len_to_cipher = len_to_cipher; /* Handle the initilization vector */ if (op_info->iv_len > 0) { DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len); op_info->iv = op_info->data + curr_size; s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len); if (unlikely(s != op_info->iv_len)) { virtio_error(vdev, "virtio-crypto iv incorrect"); goto err; } iov_discard_front(&iov, &out_num, op_info->iv_len); curr_size += op_info->iv_len; } /* Handle additional authentication data if exists */ if (op_info->aad_len > 0) { DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len); op_info->aad_data = op_info->data + curr_size; s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len); if (unlikely(s != op_info->aad_len)) { virtio_error(vdev, "virtio-crypto additional auth data incorrect"); goto err; } iov_discard_front(&iov, &out_num, op_info->aad_len); curr_size += op_info->aad_len; } /* Handle the source data */ if (op_info->src_len > 0) { DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len); op_info->src = op_info->data + curr_size; s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len); if (unlikely(s != op_info->src_len)) { virtio_error(vdev, "virtio-crypto source data incorrect"); goto err; } iov_discard_front(&iov, &out_num, op_info->src_len); curr_size += op_info->src_len; } /* Handle the destination data */ op_info->dst = op_info->data + curr_size; curr_size += op_info->dst_len; DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len); /* Handle the hash digest result */ if (hash_result_len > 0) { DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len); op_info->digest_result = op_info->data + curr_size; } return op_info; err: g_free(op_info); return NULL; }
static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) { VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); struct virtio_crypto_op_ctrl_req ctrl; VirtQueueElement *elem; struct iovec *in_iov; struct iovec *out_iov; unsigned in_num; unsigned out_num; uint32_t queue_id; uint32_t opcode; struct virtio_crypto_session_input input; int64_t session_id; uint8_t status; size_t s; for (;;) { elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { break; } if (elem->out_num < 1 || elem->in_num < 1) { virtio_error(vdev, "virtio-crypto ctrl missing headers"); virtqueue_detach_element(vq, elem, 0); g_free(elem); break; } out_num = elem->out_num; out_iov = elem->out_sg; in_num = elem->in_num; in_iov = elem->in_sg; if (unlikely(iov_to_buf(out_iov, out_num, 0, &ctrl, sizeof(ctrl)) != sizeof(ctrl))) { virtio_error(vdev, "virtio-crypto request ctrl_hdr too short"); virtqueue_detach_element(vq, elem, 0); g_free(elem); break; } iov_discard_front(&out_iov, &out_num, sizeof(ctrl)); opcode = ldl_le_p(&ctrl.header.opcode); queue_id = ldl_le_p(&ctrl.header.queue_id); switch (opcode) { case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: memset(&input, 0, sizeof(input)); session_id = virtio_crypto_create_sym_session(vcrypto, &ctrl.u.sym_create_session, queue_id, opcode, out_iov, out_num); /* Serious errors, need to reset virtio crypto device */ if (session_id == -EFAULT) { virtqueue_detach_element(vq, elem, 0); break; } else if (session_id == -VIRTIO_CRYPTO_NOTSUPP) { stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); } else if (session_id == -VIRTIO_CRYPTO_ERR) { stl_le_p(&input.status, VIRTIO_CRYPTO_ERR); } else { /* Set the session id */ stq_le_p(&input.session_id, session_id); stl_le_p(&input.status, VIRTIO_CRYPTO_OK); } s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); if (unlikely(s != sizeof(input))) { virtio_error(vdev, "virtio-crypto input incorrect"); virtqueue_detach_element(vq, elem, 0); break; } virtqueue_push(vq, elem, sizeof(input)); virtio_notify(vdev, vq); break; case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION: case VIRTIO_CRYPTO_HASH_DESTROY_SESSION: case VIRTIO_CRYPTO_MAC_DESTROY_SESSION: case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION: status = virtio_crypto_handle_close_session(vcrypto, &ctrl.u.destroy_session, queue_id); /* The status only occupy one byte, we can directly use it */ s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status)); if (unlikely(s != sizeof(status))) { virtio_error(vdev, "virtio-crypto status incorrect"); virtqueue_detach_element(vq, elem, 0); break; } virtqueue_push(vq, elem, sizeof(status)); virtio_notify(vdev, vq); break; case VIRTIO_CRYPTO_HASH_CREATE_SESSION: case VIRTIO_CRYPTO_MAC_CREATE_SESSION: case VIRTIO_CRYPTO_AEAD_CREATE_SESSION: default: error_report("virtio-crypto unsupported ctrl opcode: %d", opcode); memset(&input, 0, sizeof(input)); stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); if (unlikely(s != sizeof(input))) { virtio_error(vdev, "virtio-crypto input incorrect"); virtqueue_detach_element(vq, elem, 0); break; } virtqueue_push(vq, elem, sizeof(input)); virtio_notify(vdev, vq); break; } /* end switch case */ g_free(elem); } /* end for loop */ }
static uint64_t ppc440_pcix_reg_read4(void *opaque, hwaddr addr, unsigned size) { struct PPC440PCIXState *s = opaque; uint32_t val; switch (addr) { case PCI_VENDOR_ID ... PCI_MAX_LAT: val = ldl_le_p(s->dev->config + addr); break; case PCIX0_POM0LAL: val = s->pom[0].la; break; case PCIX0_POM0LAH: val = s->pom[0].la >> 32; break; case PCIX0_POM0SA: val = s->pom[0].sa; break; case PCIX0_POM0PCIAL: val = s->pom[0].pcia; break; case PCIX0_POM0PCIAH: val = s->pom[0].pcia >> 32; break; case PCIX0_POM1LAL: val = s->pom[1].la; break; case PCIX0_POM1LAH: val = s->pom[1].la >> 32; break; case PCIX0_POM1SA: val = s->pom[1].sa; break; case PCIX0_POM1PCIAL: val = s->pom[1].pcia; break; case PCIX0_POM1PCIAH: val = s->pom[1].pcia >> 32; break; case PCIX0_POM2SA: val = s->pom[2].sa; break; case PCIX0_PIM0SAL: val = s->pim[0].sa; break; case PCIX0_PIM0LAL: val = s->pim[0].la; break; case PCIX0_PIM0LAH: val = s->pim[0].la >> 32; break; case PCIX0_PIM1SA: val = s->pim[1].sa; break; case PCIX0_PIM1LAL: val = s->pim[1].la; break; case PCIX0_PIM1LAH: val = s->pim[1].la >> 32; break; case PCIX0_PIM2SAL: val = s->pim[2].sa; break; case PCIX0_PIM2LAL: val = s->pim[2].la; break; case PCIX0_PIM2LAH: val = s->pim[2].la >> 32; break; case PCIX0_STS: val = s->sts; break; case PCIX0_PIM0SAH: val = s->pim[0].sa >> 32; break; case PCIX0_PIM2SAH: val = s->pim[2].sa >> 32; break; default: qemu_log_mask(LOG_UNIMP, "%s: invalid PCI internal register 0x%" HWADDR_PRIx "\n", __func__, addr); val = 0; } trace_ppc440_pcix_reg_read(addr, val); return val; }