void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) { int cssid, ssid, schid, m; SubchDev *sch; ORB orig_orb, orb; uint64_t addr; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra); return; } if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) { s390_cpu_virt_mem_handle_exc(cpu, ra); return; } copy_orb_from_guest(&orb, &orig_orb); if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || !ioinst_orb_valid(&orb)) { s390_program_interrupt(env, PGM_OPERAND, 4, ra); return; } trace_ioinst_sch_id("ssch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { setcc(cpu, 3); return; } setcc(cpu, css_do_ssch(sch, &orb)); }
void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1) { int cssid, ssid, schid, m; SubchDev *sch; int ret = -ENODEV; int cc; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(&cpu->env, PGM_OPERAND, 2); return; } trace_ioinst_sch_id("hsch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_hsch(sch); } switch (ret) { case -ENODEV: cc = 3; break; case -EBUSY: cc = 2; break; case 0: cc = 0; break; default: cc = 1; break; } setcc(cpu, cc); }
int ioinst_handle_xsch(CPUS390XState *env, uint64_t reg1) { int cssid, ssid, schid, m; SubchDev *sch; int ret = -ENODEV; int cc; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(env, PGM_OPERAND, 2); return -EIO; } trace_ioinst_sch_id("xsch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_xsch(sch); } switch (ret) { case -ENODEV: cc = 3; break; case -EBUSY: cc = 2; break; case 0: cc = 0; break; default: cc = 1; break; } return cc; }
void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) { int cssid, ssid, schid, m; SubchDev *sch; uint64_t addr; int cc; SCHIB schib; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { /* * As operand exceptions have a lower priority than access exceptions, * we check whether the memory area is writeable (injecting the * access execption if it is not) first. */ if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) { program_interrupt(env, PGM_OPERAND, 2); } return; } trace_ioinst_sch_id("stsch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch) { if (css_subch_visible(sch)) { css_do_stsch(sch, &schib); cc = 0; } else { /* Indicate no more subchannels in this css/ss */ cc = 3; } } else { if (css_schid_final(m, cssid, ssid, schid)) { cc = 3; /* No more subchannels in this css/ss */ } else { /* Store an empty schib. */ memset(&schib, 0, sizeof(schib)); cc = 0; } } if (cc != 3) { if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib, sizeof(schib)) != 0) { return; } } else { /* Access exceptions have a higher priority than cc3 */ if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) { return; } } setcc(cpu, cc); }
void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) { int cssid, ssid, schid, m; SubchDev *sch; ORB orig_orb, orb; uint64_t addr; int ret = -ENODEV; int cc; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) { return; } copy_orb_from_guest(&orb, &orig_orb); if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || !ioinst_orb_valid(&orb)) { program_interrupt(env, PGM_OPERAND, 2); return; } trace_ioinst_sch_id("ssch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_ssch(sch, &orb); } switch (ret) { case -ENODEV: cc = 3; break; case -EBUSY: cc = 2; break; case -EFAULT: /* * TODO: * I'm wondering whether there is something better * to do for us here (like setting some device or * subchannel status). */ program_interrupt(env, PGM_ADDRESSING, 4); return; case 0: cc = 0; break; default: cc = 1; break; } setcc(cpu, cc); }
int ioinst_handle_ssch(CPUS390XState *env, uint64_t reg1, uint32_t ipb) { int cssid, ssid, schid, m; SubchDev *sch; ORB *orig_orb, orb; uint64_t addr; int ret = -ENODEV; int cc; hwaddr len = sizeof(*orig_orb); addr = decode_basedisp_s(env, ipb); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; } orig_orb = s390_cpu_physical_memory_map(env, addr, &len, 0); if (!orig_orb || len != sizeof(*orig_orb)) { program_interrupt(env, PGM_ADDRESSING, 2); cc = -EIO; goto out; } copy_orb_from_guest(&orb, orig_orb); if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || !ioinst_orb_valid(&orb)) { program_interrupt(env, PGM_OPERAND, 2); cc = -EIO; goto out; } trace_ioinst_sch_id("ssch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_ssch(sch, &orb); } switch (ret) { case -ENODEV: cc = 3; break; case -EBUSY: cc = 2; break; case 0: cc = 0; break; default: cc = 1; break; } out: s390_cpu_physical_memory_unmap(env, orig_orb, len, 0); return cc; }
void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) { int cssid, ssid, schid, m; SubchDev *sch; SCHIB *schib; uint64_t addr; int ret = -ENODEV; int cc; hwaddr len = sizeof(*schib); CPUS390XState *env = &cpu->env; addr = decode_basedisp_s(env, ipb); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } schib = s390_cpu_physical_memory_map(env, addr, &len, 0); if (!schib || len != sizeof(*schib)) { program_interrupt(env, PGM_ADDRESSING, 2); goto out; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || !ioinst_schib_valid(schib)) { program_interrupt(env, PGM_OPERAND, 2); goto out; } trace_ioinst_sch_id("msch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_msch(sch, schib); } switch (ret) { case -ENODEV: cc = 3; break; case -EBUSY: cc = 2; break; case 0: cc = 0; break; default: cc = 1; break; } setcc(cpu, cc); out: s390_cpu_physical_memory_unmap(env, schib, len, 0); }
int ioinst_handle_stsch(CPUS390XState *env, uint64_t reg1, uint32_t ipb) { int cssid, ssid, schid, m; SubchDev *sch; uint64_t addr; int cc; SCHIB *schib; hwaddr len = sizeof(*schib); addr = decode_basedisp_s(env, ipb); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; } schib = s390_cpu_physical_memory_map(env, addr, &len, 1); if (!schib || len != sizeof(*schib)) { program_interrupt(env, PGM_ADDRESSING, 2); cc = -EIO; goto out; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(env, PGM_OPERAND, 2); cc = -EIO; goto out; } trace_ioinst_sch_id("stsch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch) { if (css_subch_visible(sch)) { css_do_stsch(sch, schib); cc = 0; } else { /* Indicate no more subchannels in this css/ss */ cc = 3; } } else { if (css_schid_final(m, cssid, ssid, schid)) { cc = 3; /* No more subchannels in this css/ss */ } else { /* Store an empty schib. */ memset(schib, 0, sizeof(*schib)); cc = 0; } } out: s390_cpu_physical_memory_unmap(env, schib, len, 1); return cc; }
void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra) { int cssid, ssid, schid, m; SubchDev *sch; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra); return; } trace_ioinst_sch_id("hsch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { setcc(cpu, 3); return; } setcc(cpu, css_do_hsch(sch)); }
int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) { CPUS390XState *env = &cpu->env; int cssid, ssid, schid, m; SubchDev *sch; IRB irb; uint64_t addr; int cc, irb_len; uint8_t ar; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { s390_program_interrupt(env, PGM_OPERAND, 4, ra); return -EIO; } trace_ioinst_sch_id("tsch", cssid, ssid, schid); addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra); return -EIO; } sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { cc = css_do_tsch_get_irb(sch, &irb, &irb_len); } else { cc = 3; } /* 0 - status pending, 1 - not status pending, 3 - not operational */ if (cc != 3) { if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) { s390_cpu_virt_mem_handle_exc(cpu, ra); return -EFAULT; } css_do_tsch_update_subch(sch); } else { irb_len = sizeof(irb) - sizeof(irb.emw); /* Access exceptions have a higher priority than cc3 */ if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) { s390_cpu_virt_mem_handle_exc(cpu, ra); return -EFAULT; } } setcc(cpu, cc); return 0; }
void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) { int cssid, ssid, schid, m; SubchDev *sch; SCHIB schib; uint64_t addr; int ret = -ENODEV; int cc; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) { return; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || !ioinst_schib_valid(&schib)) { program_interrupt(env, PGM_OPERAND, 2); return; } trace_ioinst_sch_id("msch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_msch(sch, &schib); } switch (ret) { case -ENODEV: cc = 3; break; case -EBUSY: cc = 2; break; case 0: cc = 0; break; default: cc = 1; break; } setcc(cpu, cc); }
static int virtio_ccw_hcall_notify(const uint64_t *args) { uint64_t subch_id = args[0]; uint64_t queue = args[1]; SubchDev *sch; int cssid, ssid, schid, m; if (ioinst_disassemble_sch_ident(subch_id, &m, &cssid, &ssid, &schid)) { return -EINVAL; } sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { return -EINVAL; } if (queue >= VIRTIO_CCW_QUEUE_MAX) { return -EINVAL; } virtio_queue_notify(virtio_ccw_get_vdev(sch), queue); return 0; }
int ioinst_handle_tsch(CPUS390XState *env, uint64_t reg1, uint32_t ipb) { int cssid, ssid, schid, m; SubchDev *sch; IRB *irb; uint64_t addr; int ret = -ENODEV; int cc; hwaddr len = sizeof(*irb); if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(env, PGM_OPERAND, 2); return -EIO; } trace_ioinst_sch_id("tsch", cssid, ssid, schid); addr = decode_basedisp_s(env, ipb); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; } irb = s390_cpu_physical_memory_map(env, addr, &len, 1); if (!irb || len != sizeof(*irb)) { program_interrupt(env, PGM_ADDRESSING, 2); cc = -EIO; goto out; } sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_tsch(sch, irb); /* 0 - status pending, 1 - not status pending */ cc = ret; } else { cc = 3; } out: s390_cpu_physical_memory_unmap(env, irb, sizeof(*irb), 1); return cc; }
void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1) { int cssid, ssid, schid, m; SubchDev *sch; int ret = -ENODEV; int cc; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(&cpu->env, PGM_OPERAND, 2); return; } trace_ioinst_sch_id("csch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_csch(sch); } if (ret == -ENODEV) { cc = 3; } else { cc = 0; } setcc(cpu, cc); }
int ioinst_handle_csch(CPUS390XState *env, uint64_t reg1) { int cssid, ssid, schid, m; SubchDev *sch; int ret = -ENODEV; int cc; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(env, PGM_OPERAND, 2); return -EIO; } trace_ioinst_sch_id("csch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_csch(sch); } if (ret == -ENODEV) { cc = 3; } else { cc = 0; } return cc; }