static uint16_t vring_used_idx(VirtQueue *vq) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, idx); return lduw_phys(pa); }
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, flags); stw_phys(pa, lduw_phys(pa) & ~mask); }
static inline uint16_t vring_avail_idx(VirtQueue *vq) { hwaddr pa; pa = vq->vring.avail + offsetof(VRingAvail, idx); return lduw_phys(pa); }
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) { hwaddr pa; pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); return lduw_phys(pa); }
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) { target_phys_addr_t pa; pa = vq->vring.used + offsetof(VRingUsed, flags); stw_phys(pa, lduw_phys(pa) | mask); }
static inline uint16_t vring_desc_next(hwaddr desc_pa, int i) { hwaddr pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); return lduw_phys(pa); }
static inline uint16_t vring_avail_flags(VirtQueue *vq) { target_phys_addr_t pa; pa = vq->vring.avail + offsetof(VRingAvail, flags); return lduw_phys(pa); }
static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i) { target_phys_addr_t pa; pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); return lduw_phys(pa); }
void helper_rsm(CPUX86State *env) { X86CPU *cpu = x86_env_get_cpu(env); CPUState *cs = CPU(cpu); target_ulong sm_state; int i, offset; uint32_t val; sm_state = env->smbase + 0x8000; #ifdef TARGET_X86_64 cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0)); env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68); env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64); env->ldt.selector = lduw_phys(cs->as, sm_state + 0x7e70); env->ldt.base = ldq_phys(cs->as, sm_state + 0x7e78); env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7e74); env->ldt.flags = (lduw_phys(cs->as, sm_state + 0x7e72) & 0xf0ff) << 8; env->idt.base = ldq_phys(cs->as, sm_state + 0x7e88); env->idt.limit = ldl_phys(cs->as, sm_state + 0x7e84); env->tr.selector = lduw_phys(cs->as, sm_state + 0x7e90); env->tr.base = ldq_phys(cs->as, sm_state + 0x7e98); env->tr.limit = ldl_phys(cs->as, sm_state + 0x7e94); env->tr.flags = (lduw_phys(cs->as, sm_state + 0x7e92) & 0xf0ff) << 8; env->regs[R_EAX] = ldq_phys(cs->as, sm_state + 0x7ff8); env->regs[R_ECX] = ldq_phys(cs->as, sm_state + 0x7ff0); env->regs[R_EDX] = ldq_phys(cs->as, sm_state + 0x7fe8); env->regs[R_EBX] = ldq_phys(cs->as, sm_state + 0x7fe0); env->regs[R_ESP] = ldq_phys(cs->as, sm_state + 0x7fd8); env->regs[R_EBP] = ldq_phys(cs->as, sm_state + 0x7fd0); env->regs[R_ESI] = ldq_phys(cs->as, sm_state + 0x7fc8); env->regs[R_EDI] = ldq_phys(cs->as, sm_state + 0x7fc0); for (i = 8; i < 16; i++) { env->regs[i] = ldq_phys(cs->as, sm_state + 0x7ff8 - i * 8); } env->eip = ldq_phys(cs->as, sm_state + 0x7f78); cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7f70), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); env->dr[6] = ldl_phys(cs->as, sm_state + 0x7f68); env->dr[7] = ldl_phys(cs->as, sm_state + 0x7f60); cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f48)); cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7f50)); cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7f58)); for (i = 0; i < 6; i++) { offset = 0x7e00 + i * 16; cpu_x86_load_seg_cache(env, i, lduw_phys(cs->as, sm_state + offset), ldq_phys(cs->as, sm_state + offset + 8), ldl_phys(cs->as, sm_state + offset + 4), (lduw_phys(cs->as, sm_state + offset + 2) & 0xf0ff) << 8); } val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */ if (val & 0x20000) { env->smbase = ldl_phys(cs->as, sm_state + 0x7f00) & ~0x7fff; } #else cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7ffc)); cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7ff8)); cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7ff4), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); env->eip = ldl_phys(cs->as, sm_state + 0x7ff0); env->regs[R_EDI] = ldl_phys(cs->as, sm_state + 0x7fec); env->regs[R_ESI] = ldl_phys(cs->as, sm_state + 0x7fe8); env->regs[R_EBP] = ldl_phys(cs->as, sm_state + 0x7fe4); env->regs[R_ESP] = ldl_phys(cs->as, sm_state + 0x7fe0); env->regs[R_EBX] = ldl_phys(cs->as, sm_state + 0x7fdc); env->regs[R_EDX] = ldl_phys(cs->as, sm_state + 0x7fd8); env->regs[R_ECX] = ldl_phys(cs->as, sm_state + 0x7fd4); env->regs[R_EAX] = ldl_phys(cs->as, sm_state + 0x7fd0); env->dr[6] = ldl_phys(cs->as, sm_state + 0x7fcc); env->dr[7] = ldl_phys(cs->as, sm_state + 0x7fc8); env->tr.selector = ldl_phys(cs->as, sm_state + 0x7fc4) & 0xffff; env->tr.base = ldl_phys(cs->as, sm_state + 0x7f64); env->tr.limit = ldl_phys(cs->as, sm_state + 0x7f60); env->tr.flags = (ldl_phys(cs->as, sm_state + 0x7f5c) & 0xf0ff) << 8; env->ldt.selector = ldl_phys(cs->as, sm_state + 0x7fc0) & 0xffff; env->ldt.base = ldl_phys(cs->as, sm_state + 0x7f80); env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7f7c); env->ldt.flags = (ldl_phys(cs->as, sm_state + 0x7f78) & 0xf0ff) << 8; env->gdt.base = ldl_phys(cs->as, sm_state + 0x7f74); env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7f70); env->idt.base = ldl_phys(cs->as, sm_state + 0x7f58); env->idt.limit = ldl_phys(cs->as, sm_state + 0x7f54); for (i = 0; i < 6; i++) { if (i < 3) { offset = 0x7f84 + i * 12; } else { offset = 0x7f2c + (i - 3) * 12; } cpu_x86_load_seg_cache(env, i, ldl_phys(cs->as, sm_state + 0x7fa8 + i * 4) & 0xffff, ldl_phys(cs->as, sm_state + offset + 8), ldl_phys(cs->as, sm_state + offset + 4), (ldl_phys(cs->as, sm_state + offset) & 0xf0ff) << 8); } cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f14)); val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */ if (val & 0x20000) { env->smbase = ldl_phys(cs->as, sm_state + 0x7ef8) & ~0x7fff; } #endif env->hflags &= ~HF_SMM_MASK; cpu_smm_update(env); qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); }
/* PC hardware initialisation */ static void s390_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env = NULL; ram_addr_t ram_addr; ram_addr_t kernel_size = 0; ram_addr_t initrd_offset; ram_addr_t initrd_size = 0; int i; /* XXX we only work on KVM for now */ if (!kvm_enabled()) { fprintf(stderr, "The S390 target only works with KVM enabled\n"); exit(1); } /* get a BUS */ s390_bus = s390_virtio_bus_init(&ram_size); /* allocate RAM */ ram_addr = qemu_ram_alloc(NULL, "s390.ram", ram_size); cpu_register_physical_memory(0, ram_size, ram_addr); /* init CPUs */ if (cpu_model == NULL) { cpu_model = "host"; } ipi_states = qemu_malloc(sizeof(CPUState *) * smp_cpus); for (i = 0; i < smp_cpus; i++) { CPUState *tmp_env; tmp_env = cpu_init(cpu_model); if (!env) { env = tmp_env; } ipi_states[i] = tmp_env; tmp_env->halted = 1; tmp_env->exception_index = EXCP_HLT; } env->halted = 0; env->exception_index = 0; if (kernel_filename) { kernel_size = load_image(kernel_filename, qemu_get_ram_ptr(0)); if (lduw_phys(KERN_IMAGE_START) != 0x0dd0) { fprintf(stderr, "Specified image is not an s390 boot image\n"); exit(1); } env->psw.addr = KERN_IMAGE_START; env->psw.mask = 0x0000000180000000ULL; } else { ram_addr_t bios_size = 0; char *bios_filename; /* Load zipl bootloader */ if (bios_name == NULL) { bios_name = ZIPL_FILENAME; } bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); bios_size = load_image(bios_filename, qemu_get_ram_ptr(ZIPL_LOAD_ADDR)); qemu_free(bios_filename); if ((long)bios_size < 0) { hw_error("could not load bootloader '%s'\n", bios_name); } if (bios_size > 4096) { hw_error("stage1 bootloader is > 4k\n"); } env->psw.addr = ZIPL_START; env->psw.mask = 0x0000000180000000ULL; } if (initrd_filename) { initrd_offset = INITRD_START; while (kernel_size + 0x100000 > initrd_offset) { initrd_offset += 0x100000; } initrd_size = load_image(initrd_filename, qemu_get_ram_ptr(initrd_offset)); stq_phys(INITRD_PARM_START, initrd_offset); stq_phys(INITRD_PARM_SIZE, initrd_size); } if (kernel_cmdline) { cpu_physical_memory_rw(KERN_PARM_AREA, (uint8_t *)kernel_cmdline, strlen(kernel_cmdline), 1); } /* Create VirtIO network adapters */ for(i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; DeviceState *dev; if (!nd->model) { nd->model = qemu_strdup("virtio"); } if (strcmp(nd->model, "virtio")) { fprintf(stderr, "S390 only supports VirtIO nics\n"); exit(1); } dev = qdev_create((BusState *)s390_bus, "virtio-net-s390"); qdev_set_nic_properties(dev, nd); qdev_init_nofail(dev); } /* Create VirtIO disk drives */ for(i = 0; i < MAX_BLK_DEVS; i++) { DriveInfo *dinfo; DeviceState *dev; dinfo = drive_get(IF_IDE, 0, i); if (!dinfo) { continue; } dev = qdev_create((BusState *)s390_bus, "virtio-blk-s390"); qdev_prop_set_drive_nofail(dev, "drive", dinfo->bdrv); qdev_init_nofail(dev); } }
static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) { int ret; VqInfoBlock info; uint8_t status; VirtioFeatDesc features; void *config; hwaddr indicators; VqConfigBlock vq_config; VirtioCcwDevice *dev = sch->driver_data; bool check_len; int len; hwaddr hw_len; if (!dev) { return -EINVAL; } trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid, ccw.cmd_code); check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); /* Look at the command. */ switch (ccw.cmd_code) { case CCW_CMD_SET_VQ: if (check_len) { if (ccw.count != sizeof(info)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(info)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { info.queue = ldq_phys(ccw.cda); info.align = ldl_phys(ccw.cda + sizeof(info.queue)); info.index = lduw_phys(ccw.cda + sizeof(info.queue) + sizeof(info.align)); info.num = lduw_phys(ccw.cda + sizeof(info.queue) + sizeof(info.align) + sizeof(info.index)); ret = virtio_ccw_set_vqs(sch, info.queue, info.align, info.index, info.num); sch->curr_status.scsw.count = 0; } break; case CCW_CMD_VDEV_RESET: virtio_reset(dev->vdev); ret = 0; break; case CCW_CMD_READ_FEAT: if (check_len) { if (ccw.count != sizeof(features)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(features)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { features.index = ldub_phys(ccw.cda + sizeof(features.features)); if (features.index < ARRAY_SIZE(dev->host_features)) { features.features = dev->host_features[features.index]; } else { /* Return zeroes if the guest supports more feature bits. */ features.features = 0; } stl_le_phys(ccw.cda, features.features); sch->curr_status.scsw.count = ccw.count - sizeof(features); ret = 0; } break; case CCW_CMD_WRITE_FEAT: if (check_len) { if (ccw.count != sizeof(features)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(features)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { features.index = ldub_phys(ccw.cda + sizeof(features.features)); features.features = ldl_le_phys(ccw.cda); if (features.index < ARRAY_SIZE(dev->host_features)) { if (dev->vdev->set_features) { dev->vdev->set_features(dev->vdev, features.features); } dev->vdev->guest_features = features.features; } else { /* * If the guest supports more feature bits, assert that it * passes us zeroes for those we don't support. */ if (features.features) { fprintf(stderr, "Guest bug: features[%i]=%x (expected 0)\n", features.index, features.features); /* XXX: do a unit check here? */ } } sch->curr_status.scsw.count = ccw.count - sizeof(features); ret = 0; } break; case CCW_CMD_READ_CONF: if (check_len) { if (ccw.count > dev->vdev->config_len) { ret = -EINVAL; break; } } len = MIN(ccw.count, dev->vdev->config_len); if (!ccw.cda) { ret = -EFAULT; } else { dev->vdev->get_config(dev->vdev, dev->vdev->config); /* XXX config space endianness */ cpu_physical_memory_write(ccw.cda, dev->vdev->config, len); sch->curr_status.scsw.count = ccw.count - len; ret = 0; } break; case CCW_CMD_WRITE_CONF: if (check_len) { if (ccw.count > dev->vdev->config_len) { ret = -EINVAL; break; } } len = MIN(ccw.count, dev->vdev->config_len); hw_len = len; if (!ccw.cda) { ret = -EFAULT; } else { config = cpu_physical_memory_map(ccw.cda, &hw_len, 0); if (!config) { ret = -EFAULT; } else { len = hw_len; /* XXX config space endianness */ memcpy(dev->vdev->config, config, len); cpu_physical_memory_unmap(config, hw_len, 0, hw_len); if (dev->vdev->set_config) { dev->vdev->set_config(dev->vdev, dev->vdev->config); } sch->curr_status.scsw.count = ccw.count - len; ret = 0; } } break; case CCW_CMD_WRITE_STATUS: if (check_len) { if (ccw.count != sizeof(status)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(status)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { status = ldub_phys(ccw.cda); virtio_set_status(dev->vdev, status); if (dev->vdev->status == 0) { virtio_reset(dev->vdev); } sch->curr_status.scsw.count = ccw.count - sizeof(status); ret = 0; } break; case CCW_CMD_SET_IND: if (check_len) { if (ccw.count != sizeof(indicators)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(indicators)) { /* Can't execute command. */ ret = -EINVAL; break; } indicators = ldq_phys(ccw.cda); if (!indicators) { ret = -EFAULT; } else { dev->indicators = indicators; sch->curr_status.scsw.count = ccw.count - sizeof(indicators); ret = 0; } break; case CCW_CMD_SET_CONF_IND: if (check_len) { if (ccw.count != sizeof(indicators)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(indicators)) { /* Can't execute command. */ ret = -EINVAL; break; } indicators = ldq_phys(ccw.cda); if (!indicators) { ret = -EFAULT; } else { dev->indicators2 = indicators; sch->curr_status.scsw.count = ccw.count - sizeof(indicators); ret = 0; } break; case CCW_CMD_READ_VQ_CONF: if (check_len) { if (ccw.count != sizeof(vq_config)) { ret = -EINVAL; break; } } else if (ccw.count < sizeof(vq_config)) { /* Can't execute command. */ ret = -EINVAL; break; } if (!ccw.cda) { ret = -EFAULT; } else { vq_config.index = lduw_phys(ccw.cda); vq_config.num_max = virtio_queue_get_num(dev->vdev, vq_config.index); stw_phys(ccw.cda + sizeof(vq_config.index), vq_config.num_max); sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); ret = 0; } break; default: ret = -ENOSYS; break; } return ret; }