static void virtio_ccw_notify(DeviceState *d, uint16_t vector) { VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d); SubchDev *sch = dev->sch; uint64_t indicators; if (vector >= 128) { return; } if (vector < VIRTIO_PCI_QUEUE_MAX) { if (!dev->indicators) { return; } indicators = ldq_phys(dev->indicators); indicators |= 1ULL << vector; stq_phys(dev->indicators, indicators); } else { if (!dev->indicators2) { return; } vector = 0; indicators = ldq_phys(dev->indicators2); indicators |= 1ULL << vector; stq_phys(dev->indicators2, indicators); } css_conditional_io_interrupt(sch); }
/* PAE Paging or IA-32e Paging */ static void walk_pte(MemoryMappingList *list, hwaddr pte_start_addr, int32_t a20_mask, target_ulong start_line_addr) { hwaddr pte_addr, start_paddr; uint64_t pte; target_ulong start_vaddr; int i; for (i = 0; i < 512; i++) { pte_addr = (pte_start_addr + i * 8) & a20_mask; pte = ldq_phys(pte_addr); if (!(pte & PG_PRESENT_MASK)) { /* not present */ continue; } start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63); if (cpu_physical_memory_is_io(start_paddr)) { /* I/O region */ continue; } start_vaddr = start_line_addr | ((i & 0x1ff) << 12); memory_mapping_list_add_merge_sorted(list, start_paddr, start_vaddr, 1 << 12); } }
static void walk_pde(MemoryMappingList *list, hwaddr pde_start_addr, int32_t a20_mask, target_ulong start_line_addr) { hwaddr pde_addr, pte_start_addr, start_paddr; uint64_t pde; target_ulong line_addr, start_vaddr; int i; for (i = 0; i < 512; i++) { pde_addr = (pde_start_addr + i * 8) & a20_mask; pde = ldq_phys(pde_addr); if (!(pde & PG_PRESENT_MASK)) { /* not present */ continue; } line_addr = start_line_addr | ((i & 0x1ff) << 21); if (pde & PG_PSE_MASK) { /* 2 MB page */ start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63); if (cpu_physical_memory_is_io(start_paddr)) { /* I/O region */ continue; } start_vaddr = line_addr; memory_mapping_list_add_merge_sorted(list, start_paddr, start_vaddr, 1 << 21); continue; } pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask; walk_pte(list, pte_start_addr, a20_mask, line_addr); } }
hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; PPCVirtualHypervisorClass *vhc; hwaddr raddr, pte_addr; uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; int page_size, fault_cause = 0; ppc_v3_pate_t pate; /* Handle Real Mode */ if (msr_dr == 0) { /* In real mode top 4 effective addr bits (mostly) ignored */ return eaddr & 0x0FFFFFFFFFFFFFFFULL; } /* Virtual Mode Access - get the fully qualified address */ if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { return -1; } /* Get Process Table */ if (cpu->vhyp) { vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->get_pate(cpu->vhyp, &pate); } else { if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { return -1; } if (!validate_pate(cpu, lpid, &pate)) { return -1; } /* We don't support guest mode yet */ if (lpid != 0) { error_report("PowerNV guest support Unimplemented"); exit(1); } } /* Index Process Table by PID to Find Corresponding Process Table Entry */ offset = pid * sizeof(struct prtb_entry); size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); if (offset >= size) { /* offset exceeds size of the process table */ return -1; } prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ page_size = PRTBE_R_GET_RTS(prtbe0); pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, &raddr, &page_size, &fault_cause, &pte_addr); if (!pte) { return -1; } return raddr & TARGET_PAGE_MASK; }
uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v) { uint64_t ret = 0; if (p == env->lock_addr) { uint64_t old = ldq_phys(p); if (old == env->lock_value) { stq_phys(p, v); ret = 1; } } env->lock_addr = -1; return ret; }
uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v) { CPUState *cs = CPU(alpha_env_get_cpu(env)); uint64_t ret = 0; if (p == env->lock_addr) { uint64_t old = ldq_phys(cs->as, p); if (old == env->lock_value) { stq_phys(cs->as, p, v); ret = 1; } } env->lock_addr = -1; return ret; }
/* IA-32e Paging */ static void walk_pml4e(MemoryMappingList *list, hwaddr pml4e_start_addr, int32_t a20_mask) { hwaddr pml4e_addr, pdpe_start_addr; uint64_t pml4e; target_ulong line_addr; int i; for (i = 0; i < 512; i++) { pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask; pml4e = ldq_phys(pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) { /* not present */ continue; } line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48); pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask; walk_pdpe(list, pdpe_start_addr, a20_mask, line_addr); } }
/* PAE Paging */ static void walk_pdpe2(MemoryMappingList *list, hwaddr pdpe_start_addr, int32_t a20_mask) { hwaddr pdpe_addr, pde_start_addr; uint64_t pdpe; target_ulong line_addr; int i; for (i = 0; i < 4; i++) { pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; pdpe = ldq_phys(pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { /* not present */ continue; } line_addr = (((unsigned int)i & 0x3) << 30); pde_start_addr = (pdpe & ~0xfff) & a20_mask; walk_pde(list, pde_start_addr, a20_mask, line_addr); } }
static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr, uint64_t base_addr, uint64_t nls, hwaddr *raddr, int *psize, int *fault_cause, hwaddr *pte_addr) { CPUState *cs = CPU(cpu); uint64_t index, pde; if (nls < 5) { /* Directory maps less than 2**5 entries */ *fault_cause |= DSISR_R_BADCONFIG; return 0; } /* Read page <directory/table> entry from guest address space */ index = eaddr >> (*psize - nls); /* Shift */ index &= ((1UL << nls) - 1); /* Mask */ pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde))); if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ *fault_cause |= DSISR_NOPTE; return 0; } *psize -= nls; /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */ if (pde & R_PTE_LEAF) { uint64_t rpn = pde & R_PTE_RPN; uint64_t mask = (1UL << *psize) - 1; /* Or high bits of rpn and low bits to ea to form whole real addr */ *raddr = (rpn & ~mask) | (eaddr & mask); *pte_addr = base_addr + (index * sizeof(pde)); return pde; } /* Next Level of Radix Tree */ return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS, raddr, psize, fault_cause, pte_addr); }
static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i) { hwaddr pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); return ldq_phys(pa); }
static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i) { target_phys_addr_t pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); return ldq_phys(pa); }
void helper_rsm(CPUX86State *env) { X86CPU *cpu = x86_env_get_cpu(env); CPUState *cs = CPU(cpu); target_ulong sm_state; int i, offset; uint32_t val; sm_state = env->smbase + 0x8000; #ifdef TARGET_X86_64 cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0)); env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68); env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64); env->ldt.selector = lduw_phys(cs->as, sm_state + 0x7e70); env->ldt.base = ldq_phys(cs->as, sm_state + 0x7e78); env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7e74); env->ldt.flags = (lduw_phys(cs->as, sm_state + 0x7e72) & 0xf0ff) << 8; env->idt.base = ldq_phys(cs->as, sm_state + 0x7e88); env->idt.limit = ldl_phys(cs->as, sm_state + 0x7e84); env->tr.selector = lduw_phys(cs->as, sm_state + 0x7e90); env->tr.base = ldq_phys(cs->as, sm_state + 0x7e98); env->tr.limit = ldl_phys(cs->as, sm_state + 0x7e94); env->tr.flags = (lduw_phys(cs->as, sm_state + 0x7e92) & 0xf0ff) << 8; env->regs[R_EAX] = ldq_phys(cs->as, sm_state + 0x7ff8); env->regs[R_ECX] = ldq_phys(cs->as, sm_state + 0x7ff0); env->regs[R_EDX] = ldq_phys(cs->as, sm_state + 0x7fe8); env->regs[R_EBX] = ldq_phys(cs->as, sm_state + 0x7fe0); env->regs[R_ESP] = ldq_phys(cs->as, sm_state + 0x7fd8); env->regs[R_EBP] = ldq_phys(cs->as, sm_state + 0x7fd0); env->regs[R_ESI] = ldq_phys(cs->as, sm_state + 0x7fc8); env->regs[R_EDI] = ldq_phys(cs->as, sm_state + 0x7fc0); for (i = 8; i < 16; i++) { env->regs[i] = ldq_phys(cs->as, sm_state + 0x7ff8 - i * 8); } env->eip = ldq_phys(cs->as, sm_state + 0x7f78); cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7f70), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); env->dr[6] = ldl_phys(cs->as, sm_state + 0x7f68); env->dr[7] = ldl_phys(cs->as, sm_state + 0x7f60); cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f48)); cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7f50)); cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7f58)); for (i = 0; i < 6; i++) { offset = 0x7e00 + i * 16; cpu_x86_load_seg_cache(env, i, lduw_phys(cs->as, sm_state + offset), ldq_phys(cs->as, sm_state + offset + 8), ldl_phys(cs->as, sm_state + offset + 4), (lduw_phys(cs->as, sm_state + offset + 2) & 0xf0ff) << 8); } val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */ if (val & 0x20000) { env->smbase = ldl_phys(cs->as, sm_state + 0x7f00) & ~0x7fff; } #else cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7ffc)); cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7ff8)); cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7ff4), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); env->eip = ldl_phys(cs->as, sm_state + 0x7ff0); env->regs[R_EDI] = ldl_phys(cs->as, sm_state + 0x7fec); env->regs[R_ESI] = ldl_phys(cs->as, sm_state + 0x7fe8); env->regs[R_EBP] = ldl_phys(cs->as, sm_state + 0x7fe4); env->regs[R_ESP] = ldl_phys(cs->as, sm_state + 0x7fe0); env->regs[R_EBX] = ldl_phys(cs->as, sm_state + 0x7fdc); env->regs[R_EDX] = ldl_phys(cs->as, sm_state + 0x7fd8); env->regs[R_ECX] = ldl_phys(cs->as, sm_state + 0x7fd4); env->regs[R_EAX] = ldl_phys(cs->as, sm_state + 0x7fd0); env->dr[6] = ldl_phys(cs->as, sm_state + 0x7fcc); env->dr[7] = ldl_phys(cs->as, sm_state + 0x7fc8); env->tr.selector = ldl_phys(cs->as, sm_state + 0x7fc4) & 0xffff; env->tr.base = ldl_phys(cs->as, sm_state + 0x7f64); env->tr.limit = ldl_phys(cs->as, sm_state + 0x7f60); env->tr.flags = (ldl_phys(cs->as, sm_state + 0x7f5c) & 0xf0ff) << 8; env->ldt.selector = ldl_phys(cs->as, sm_state + 0x7fc0) & 0xffff; env->ldt.base = ldl_phys(cs->as, sm_state + 0x7f80); env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7f7c); env->ldt.flags = (ldl_phys(cs->as, sm_state + 0x7f78) & 0xf0ff) << 8; env->gdt.base = ldl_phys(cs->as, sm_state + 0x7f74); env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7f70); env->idt.base = ldl_phys(cs->as, sm_state + 0x7f58); env->idt.limit = ldl_phys(cs->as, sm_state + 0x7f54); for (i = 0; i < 6; i++) { if (i < 3) { offset = 0x7f84 + i * 12; } else { offset = 0x7f2c + (i - 3) * 12; } cpu_x86_load_seg_cache(env, i, ldl_phys(cs->as, sm_state + 0x7fa8 + i * 4) & 0xffff, ldl_phys(cs->as, sm_state + offset + 8), ldl_phys(cs->as, sm_state + offset + 4), (ldl_phys(cs->as, sm_state + offset) & 0xf0ff) << 8); } cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f14)); val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */ if (val & 0x20000) { env->smbase = ldl_phys(cs->as, sm_state + 0x7ef8) & ~0x7fff; } #endif env->hflags &= ~HF_SMM_MASK; cpu_smm_update(env); qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); }
uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p) { env->lock_addr = p; return env->lock_value = ldq_phys(p); }
uint64_t helper_ldq_phys(uint64_t p) { return ldq_phys(p); }
uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p) { CPUState *cs = CPU(alpha_env_get_cpu(env)); env->lock_addr = p; return env->lock_value = ldq_phys(cs->as, p); }
uint64_t helper_ldq_phys(CPUAlphaState *env, uint64_t p) { CPUState *cs = CPU(alpha_env_get_cpu(env)); return ldq_phys(cs->as, p); }
uint64_t helper_ldq_phys(CPUAlphaState *env, uint64_t p) { CPUState *cs = ENV_GET_CPU(env); return ldq_phys(cs->as, p); }
uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p) { CPUState *cs = ENV_GET_CPU(env); env->lock_addr = p; return env->lock_value = ldq_phys(cs->as, p); }
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); hwaddr raddr, pte_addr; uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0, pte; int page_size, prot, fault_cause = 0; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); assert(!msr_hv); /* For now there is no Radix PowerNV Support */ assert(cpu->vhyp); assert(ppc64_use_proc_tbl(cpu)); /* Real Mode Access */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { /* In real mode top 4 effective addr bits (mostly) ignored */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return 0; } /* Virtual Mode Access - get the fully qualified address */ if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { ppc_radix64_raise_segi(cpu, rwx, eaddr); return 1; } /* Get Process Table */ patbe = vhc->get_patbe(cpu->vhyp); /* Index Process Table by PID to Find Corresponding Process Table Entry */ offset = pid * sizeof(struct prtb_entry); size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12); if (offset >= size) { /* offset exceeds size of the process table */ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); return 1; } prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) + offset); /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ page_size = PRTBE_R_GET_RTS(prtbe0); pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, &raddr, &page_size, &fault_cause, &pte_addr); if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) { /* Couldn't get pte or access denied due to protection */ ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); return 1; } /* Update Reference and Change Bits */ ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, 1UL << page_size); return 0; }
static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) { int ret; VqInfoBlock info; uint8_t status; VirtioFeatDesc features; void *config; hwaddr indicators; VqConfigBlock vq_config; VirtioCcwDevice *dev = sch->driver_data; bool check_len; int len; hwaddr hw_len; if (!dev) { return -EINVAL; } trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid, ccw.cmd_code); check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); /* Look at the command. */ switch (ccw.cmd_code) { case CCW_CMD_SET_VQ: if (check_len) { if (ccw.count != sizeof(info)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(info)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { info.queue = ldq_phys(ccw.cda); info.align = ldl_phys(ccw.cda + sizeof(info.queue)); info.index = lduw_phys(ccw.cda + sizeof(info.queue) + sizeof(info.align)); info.num = lduw_phys(ccw.cda + sizeof(info.queue) + sizeof(info.align) + sizeof(info.index)); ret = virtio_ccw_set_vqs(sch, info.queue, info.align, info.index, info.num); sch->curr_status.scsw.count = 0; } break; case CCW_CMD_VDEV_RESET: virtio_reset(dev->vdev); ret = 0; break; case CCW_CMD_READ_FEAT: if (check_len) { if (ccw.count != sizeof(features)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(features)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { features.index = ldub_phys(ccw.cda + sizeof(features.features)); if (features.index < ARRAY_SIZE(dev->host_features)) { features.features = dev->host_features[features.index]; } else { /* Return zeroes if the guest supports more feature bits. */ features.features = 0; } stl_le_phys(ccw.cda, features.features); sch->curr_status.scsw.count = ccw.count - sizeof(features); ret = 0; } break; case CCW_CMD_WRITE_FEAT: if (check_len) { if (ccw.count != sizeof(features)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(features)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { features.index = ldub_phys(ccw.cda + sizeof(features.features)); features.features = ldl_le_phys(ccw.cda); if (features.index < ARRAY_SIZE(dev->host_features)) { if (dev->vdev->set_features) { dev->vdev->set_features(dev->vdev, features.features); } dev->vdev->guest_features = features.features; } else { /* * If the guest supports more feature bits, assert that it * passes us zeroes for those we don't support. */ if (features.features) { fprintf(stderr, "Guest bug: features[%i]=%x (expected 0)\n", features.index, features.features); /* XXX: do a unit check here? */ } } sch->curr_status.scsw.count = ccw.count - sizeof(features); ret = 0; } break; case CCW_CMD_READ_CONF: if (check_len) { if (ccw.count > dev->vdev->config_len) { ret = -EINVAL; break; } } len = MIN(ccw.count, dev->vdev->config_len); if (!ccw.cda) { ret = -EFAULT; } else { dev->vdev->get_config(dev->vdev, dev->vdev->config); /* XXX config space endianness */ cpu_physical_memory_write(ccw.cda, dev->vdev->config, len); sch->curr_status.scsw.count = ccw.count - len; ret = 0; } break; case CCW_CMD_WRITE_CONF: if (check_len) { if (ccw.count > dev->vdev->config_len) { ret = -EINVAL; break; } } len = MIN(ccw.count, dev->vdev->config_len); hw_len = len; if (!ccw.cda) { ret = -EFAULT; } else { config = cpu_physical_memory_map(ccw.cda, &hw_len, 0); if (!config) { ret = -EFAULT; } else { len = hw_len; /* XXX config space endianness */ memcpy(dev->vdev->config, config, len); cpu_physical_memory_unmap(config, hw_len, 0, hw_len); if (dev->vdev->set_config) { dev->vdev->set_config(dev->vdev, dev->vdev->config); } sch->curr_status.scsw.count = ccw.count - len; ret = 0; } } break; case CCW_CMD_WRITE_STATUS: if (check_len) { if (ccw.count != sizeof(status)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(status)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { status = ldub_phys(ccw.cda); virtio_set_status(dev->vdev, status); if (dev->vdev->status == 0) { virtio_reset(dev->vdev); } sch->curr_status.scsw.count = ccw.count - sizeof(status); ret = 0; } break; case CCW_CMD_SET_IND: if (check_len) { if (ccw.count != sizeof(indicators)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(indicators)) { /* Can't execute command. */ ret = -EINVAL; break; } indicators = ldq_phys(ccw.cda); if (!indicators) { ret = -EFAULT; } else { dev->indicators = indicators; sch->curr_status.scsw.count = ccw.count - sizeof(indicators); ret = 0; } break; case CCW_CMD_SET_CONF_IND: if (check_len) { if (ccw.count != sizeof(indicators)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(indicators)) { /* Can't execute command. */ ret = -EINVAL; break; } indicators = ldq_phys(ccw.cda); if (!indicators) { ret = -EFAULT; } else { dev->indicators2 = indicators; sch->curr_status.scsw.count = ccw.count - sizeof(indicators); ret = 0; } break; case CCW_CMD_READ_VQ_CONF: if (check_len) { if (ccw.count != sizeof(vq_config)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(vq_config)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { vq_config.index = lduw_phys(ccw.cda); vq_config.num_max = virtio_queue_get_num(dev->vdev, vq_config.index); stw_phys(ccw.cda + sizeof(vq_config.index), vq_config.num_max); sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); ret = 0; } break; default: ret = -ENOSYS; break; } return ret; }