/* Send an option request. * * The request is for option @opt, with @data containing @len bytes of * additional payload for the request (@len may be -1 to treat @data as * a C string; and @data may be NULL if @len is 0). * Return 0 if successful, -1 with errp set if it is impossible to * continue. */ static int nbd_send_option_request(QIOChannel *ioc, uint32_t opt, uint32_t len, const char *data, Error **errp) { nbd_option req; QEMU_BUILD_BUG_ON(sizeof(req) != 16); if (len == -1) { req.length = len = strlen(data); } trace_nbd_send_option_request(opt, nbd_opt_lookup(opt), len); stq_be_p(&req.magic, NBD_OPTS_MAGIC); stl_be_p(&req.option, opt); stl_be_p(&req.length, len); if (nbd_write(ioc, &req, sizeof(req), errp) < 0) { error_prepend(errp, "Failed to send option request header"); return -1; } if (len && nbd_write(ioc, (char *) data, len, errp) < 0) { error_prepend(errp, "Failed to send option request data"); return -1; } return 0; }
static int multipath_pr_in(int fd, const uint8_t *cdb, uint8_t *sense, uint8_t *data, int sz) { int rq_servact = cdb[1]; struct prin_resp resp; size_t written; int r; switch (rq_servact) { case MPATH_PRIN_RKEY_SA: case MPATH_PRIN_RRES_SA: case MPATH_PRIN_RCAP_SA: break; case MPATH_PRIN_RFSTAT_SA: /* Nobody implements it anyway, so bail out. */ default: /* Cannot parse any other output. */ scsi_build_sense(sense, SENSE_CODE(INVALID_FIELD)); return CHECK_CONDITION; } r = mpath_persistent_reserve_in(fd, rq_servact, &resp, noisy, verbose); if (r == MPATH_PR_SUCCESS) { switch (rq_servact) { case MPATH_PRIN_RKEY_SA: case MPATH_PRIN_RRES_SA: { struct prin_readdescr *out = &resp.prin_descriptor.prin_readkeys; assert(sz >= 8); written = MIN(out->additional_length + 8, sz); stl_be_p(&data[0], out->prgeneration); stl_be_p(&data[4], out->additional_length); memcpy(&data[8], out->key_list, written - 8); break; } case MPATH_PRIN_RCAP_SA: { struct prin_capdescr *out = &resp.prin_descriptor.prin_readcap; assert(sz >= 6); written = 6; stw_be_p(&data[0], out->length); data[2] = out->flags[0]; data[3] = out->flags[1]; stw_be_p(&data[4], out->pr_type_mask); break; } default: scsi_build_sense(sense, SENSE_CODE(INVALID_OPCODE)); return CHECK_CONDITION; } assert(written <= sz); memset(data + written, 0, sz - written); } return mpath_reconstruct_sense(fd, r, sense); }
int nbd_send_request(QIOChannel *ioc, NBDRequest *request) { uint8_t buf[NBD_REQUEST_SIZE]; trace_nbd_send_request(request->from, request->len, request->handle, request->flags, request->type, nbd_cmd_lookup(request->type)); stl_be_p(buf, NBD_REQUEST_MAGIC); stw_be_p(buf + 4, request->flags); stw_be_p(buf + 6, request->type); stq_be_p(buf + 8, request->handle); stq_be_p(buf + 16, request->from); stl_be_p(buf + 24, request->len); return nbd_write(ioc, buf, sizeof(buf), NULL); }
static void pcie_aer_update_log(PCIDevice *dev, const PCIEAERErr *err) { uint8_t *aer_cap = dev->config + dev->exp.aer_cap; uint8_t first_bit = ctz32(err->status); uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); int i; assert(err->status); assert(!(err->status & (err->status - 1))); errcap &= ~(PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP); errcap |= PCI_ERR_CAP_FEP(first_bit); if (err->flags & PCIE_AER_ERR_HEADER_VALID) { for (i = 0; i < ARRAY_SIZE(err->header); ++i) { /* 7.10.8 Header Log Register */ uint8_t *header_log = aer_cap + PCI_ERR_HEADER_LOG + i * sizeof err->header[0]; stl_be_p(header_log, err->header[i]); } } else { assert(!(err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT)); memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE); } if ((err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT) && (pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_EETLPP)) { for (i = 0; i < ARRAY_SIZE(err->prefix); ++i) { /* 7.10.12 tlp prefix log register */ uint8_t *prefix_log = aer_cap + PCI_ERR_TLP_PREFIX_LOG + i * sizeof err->prefix[0]; stl_be_p(prefix_log, err->prefix[i]); } errcap |= PCI_ERR_CAP_TLP; } else { memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0, PCI_ERR_TLP_PREFIX_LOG_SIZE); } pci_set_long(aer_cap + PCI_ERR_CAP, errcap); }
/* warning: addr must be aligned */ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result, enum device_endian endian) { uint8_t *ptr; MemoryRegion *mr; hwaddr l = 4; hwaddr addr1; MemTxResult r; bool release_lock = false; RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true); if (l < 4 || !IS_DIRECT(mr, true)) { release_lock |= prepare_mmio_access(mr); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap32(val); } #else if (endian == DEVICE_BIG_ENDIAN) { val = bswap32(val); } #endif r = memory_region_dispatch_write(mr, addr1, val, 4, attrs); } else { /* RAM case */ ptr = MAP_RAM(mr, addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: stl_le_p(ptr, val); break; case DEVICE_BIG_ENDIAN: stl_be_p(ptr, val); break; default: stl_p(ptr, val); break; } INVALIDATE(mr, addr1, 4); r = MEMTX_OK; } if (result) { *result = r; } if (release_lock) { qemu_mutex_unlock_iothread(); } RCU_READ_UNLOCK(); }
static void scsi_read_complete(void * opaque, int ret) { SCSIGenericReq *r = (SCSIGenericReq *)opaque; SCSIDevice *s = r->req.dev; int len; assert(r->req.aiocb != NULL); r->req.aiocb = NULL; aio_context_acquire(blk_get_aio_context(s->conf.blk)); if (ret || r->req.io_canceled) { scsi_command_complete_noio(r, ret); goto done; } len = r->io_header.dxfer_len - r->io_header.resid; DPRINTF("Data ready tag=0x%x len=%d\n", r->req.tag, len); r->len = -1; if (len == 0) { scsi_command_complete_noio(r, 0); goto done; } /* Snoop READ CAPACITY output to set the blocksize. */ if (r->req.cmd.buf[0] == READ_CAPACITY_10 && (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { s->blocksize = ldl_be_p(&r->buf[4]); s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { s->blocksize = ldl_be_p(&r->buf[8]); s->max_lba = ldq_be_p(&r->buf[0]); } blk_set_guest_block_size(s->conf.blk, s->blocksize); /* Patch MODE SENSE device specific parameters if the BDS is opened * readonly. */ if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) && blk_is_read_only(s->conf.blk) && (r->req.cmd.buf[0] == MODE_SENSE || r->req.cmd.buf[0] == MODE_SENSE_10) && (r->req.cmd.buf[1] & 0x8) == 0) { if (r->req.cmd.buf[0] == MODE_SENSE) { r->buf[2] |= 0x80; } else { r->buf[3] |= 0x80; } } if (s->type == TYPE_DISK && r->req.cmd.buf[0] == INQUIRY && r->req.cmd.buf[2] == 0xb0) { uint32_t max_transfer = blk_get_max_transfer(s->conf.blk) / s->blocksize; assert(max_transfer); stl_be_p(&r->buf[8], max_transfer); /* Also take care of the opt xfer len. */ stl_be_p(&r->buf[12], MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); } scsi_req_data(&r->req, len); scsi_req_unref(&r->req); done: aio_context_release(blk_get_aio_context(s->conf.blk)); }
/* Returns -1 if NBD_OPT_GO proves the export @wantname cannot be * used, 0 if NBD_OPT_GO is unsupported (fall back to NBD_OPT_LIST and * NBD_OPT_EXPORT_NAME in that case), and > 0 if the export is good to * go (with @info populated). */ static int nbd_opt_go(QIOChannel *ioc, const char *wantname, NBDExportInfo *info, Error **errp) { nbd_opt_reply reply; uint32_t len = strlen(wantname); uint16_t type; int error; char *buf; /* The protocol requires that the server send NBD_INFO_EXPORT with * a non-zero flags (at least NBD_FLAG_HAS_FLAGS must be set); so * flags still 0 is a witness of a broken server. */ info->flags = 0; trace_nbd_opt_go_start(wantname); buf = g_malloc(4 + len + 2 + 2 * info->request_sizes + 1); stl_be_p(buf, len); memcpy(buf + 4, wantname, len); /* At most one request, everything else up to server */ stw_be_p(buf + 4 + len, info->request_sizes); if (info->request_sizes) { stw_be_p(buf + 4 + len + 2, NBD_INFO_BLOCK_SIZE); } error = nbd_send_option_request(ioc, NBD_OPT_GO, 4 + len + 2 + 2 * info->request_sizes, buf, errp); g_free(buf); if (error < 0) { return -1; } while (1) { if (nbd_receive_option_reply(ioc, NBD_OPT_GO, &reply, errp) < 0) { return -1; } error = nbd_handle_reply_err(ioc, &reply, errp); if (error <= 0) { return error; } len = reply.length; if (reply.type == NBD_REP_ACK) { /* Server is done sending info and moved into transmission phase, but make sure it sent flags */ if (len) { error_setg(errp, "server sent invalid NBD_REP_ACK"); return -1; } if (!info->flags) { error_setg(errp, "broken server omitted NBD_INFO_EXPORT"); return -1; } trace_nbd_opt_go_success(); return 1; } if (reply.type != NBD_REP_INFO) { error_setg(errp, "unexpected reply type %" PRIx32 " (%s), expected %x", reply.type, nbd_rep_lookup(reply.type), NBD_REP_INFO); nbd_send_opt_abort(ioc); return -1; } if (len < sizeof(type)) { error_setg(errp, "NBD_REP_INFO length %" PRIu32 " is too short", len); nbd_send_opt_abort(ioc); return -1; } if (nbd_read(ioc, &type, sizeof(type), errp) < 0) { error_prepend(errp, "failed to read info type"); nbd_send_opt_abort(ioc); return -1; } len -= sizeof(type); be16_to_cpus(&type); switch (type) { case NBD_INFO_EXPORT: if (len != sizeof(info->size) + sizeof(info->flags)) { error_setg(errp, "remaining export info len %" PRIu32 " is unexpected size", len); nbd_send_opt_abort(ioc); return -1; } if (nbd_read(ioc, &info->size, sizeof(info->size), errp) < 0) { error_prepend(errp, "failed to read info size"); nbd_send_opt_abort(ioc); return -1; } be64_to_cpus(&info->size); if (nbd_read(ioc, &info->flags, sizeof(info->flags), errp) < 0) { error_prepend(errp, "failed to read info flags"); nbd_send_opt_abort(ioc); return -1; } be16_to_cpus(&info->flags); trace_nbd_receive_negotiate_size_flags(info->size, info->flags); break; case NBD_INFO_BLOCK_SIZE: if (len != sizeof(info->min_block) * 3) { error_setg(errp, "remaining export info len %" PRIu32 " is unexpected size", len); nbd_send_opt_abort(ioc); return -1; } if (nbd_read(ioc, &info->min_block, sizeof(info->min_block), errp) < 0) { error_prepend(errp, "failed to read info minimum block size"); nbd_send_opt_abort(ioc); return -1; } be32_to_cpus(&info->min_block); if (!is_power_of_2(info->min_block)) { error_setg(errp, "server minimum block size %" PRId32 "is not a power of two", info->min_block); nbd_send_opt_abort(ioc); return -1; } if (nbd_read(ioc, &info->opt_block, sizeof(info->opt_block), errp) < 0) { error_prepend(errp, "failed to read info preferred block size"); nbd_send_opt_abort(ioc); return -1; } be32_to_cpus(&info->opt_block); if (!is_power_of_2(info->opt_block) || info->opt_block < info->min_block) { error_setg(errp, "server preferred block size %" PRId32 "is not valid", info->opt_block); nbd_send_opt_abort(ioc); return -1; } if (nbd_read(ioc, &info->max_block, sizeof(info->max_block), errp) < 0) { error_prepend(errp, "failed to read info maximum block size"); nbd_send_opt_abort(ioc); return -1; } be32_to_cpus(&info->max_block); trace_nbd_opt_go_info_block_size(info->min_block, info->opt_block, info->max_block); break; default: trace_nbd_opt_go_info_unknown(type, nbd_info_lookup(type)); if (nbd_drop(ioc, len, errp) < 0) { error_prepend(errp, "Failed to read info payload"); nbd_send_opt_abort(ioc); return -1; } break; } } }