Example #1
0
void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    qemu_mutex_lock_iothread();
    ioinst_handle_rsch(cpu, r1);
    qemu_mutex_unlock_iothread();
}
Example #2
0
/* Canonicalize the current cpu's features into the 64-bit words required
   by STFLE.  Return the index-1 of the max word that is non-zero.  */
static unsigned do_stfle(CPUS390XState *env, uint64_t words[MAX_STFL_WORDS])
{
    S390CPU *cpu = s390_env_get_cpu(env);
    const unsigned long *features = cpu->model->features;
    unsigned max_bit = 0;
    S390Feat feat;

    memset(words, 0, sizeof(uint64_t) * MAX_STFL_WORDS);

    if (test_bit(S390_FEAT_ZARCH, features)) {
        /* z/Architecture is always active if around */
        words[0] = 1ull << (63 - 2);
    }

    for (feat = find_first_bit(features, S390_FEAT_MAX);
         feat < S390_FEAT_MAX;
         feat = find_next_bit(features, S390_FEAT_MAX, feat + 1)) {
        const S390FeatDef *def = s390_feat_def(feat);
        if (def->type == S390_FEAT_TYPE_STFL) {
            unsigned bit = def->bit;
            if (bit > max_bit) {
                max_bit = bit;
            }
            assert(bit / 64 < MAX_STFL_WORDS);
            words[bit / 64] |= 1ULL << (63 - bit % 64);
        }
    }

    return max_bit / 64;
}
Example #3
0
void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    qemu_mutex_lock_iothread();
    ioinst_handle_chsc(cpu, inst >> 16);
    qemu_mutex_unlock_iothread();
}
Example #4
0
void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
{
    S390CPU *cpu = s390_env_get_cpu(env);

    qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
                  env->psw.addr);

    if (kvm_enabled()) {
#ifdef CONFIG_KVM
        struct kvm_s390_irq irq = {
            .type = KVM_S390_PROGRAM_INT,
            .u.pgm.code = code,
        };

        kvm_s390_vcpu_interrupt(cpu, &irq);
#endif
    } else {
        CPUState *cs = CPU(cpu);

        env->int_pgm_code = code;
        env->int_pgm_ilen = ilen;
        cs->exception_index = EXCP_PGM;
        cpu_loop_exit(cs);
    }
}
Example #5
0
/* Raise an exception statically from a TB.  */
void HELPER(exception)(CPUS390XState *env, uint32_t excp)
{
    CPUState *cs = CPU(s390_env_get_cpu(env));

    HELPER_LOG("%s: exception %d\n", __func__, excp);
    cs->exception_index = excp;
    cpu_loop_exit(cs);
}
Example #6
0
/* Ensure to exit the TB after this call! */
void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
{
    CPUState *cs = CPU(s390_env_get_cpu(env));

    cs->exception_index = EXCP_PGM;
    env->int_pgm_code = code;
    env->int_pgm_ilen = ilen;
}
Example #7
0
/* Set Prefix */
void HELPER(spx)(CPUS390XState *env, uint64_t a1)
{
    CPUState *cs = CPU(s390_env_get_cpu(env));
    uint32_t prefix = a1 & 0x7fffe000;

    env->psa = prefix;
    qemu_log("prefix: %#x\n", prefix);
    tlb_flush_page(cs, 0);
    tlb_flush_page(cs, TARGET_PAGE_SIZE);
}
Example #8
0
uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
                      uint64_t cpu_addr)
{
    int cc = SIGP_CC_ORDER_CODE_ACCEPTED;

    HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
               __func__, order_code, r1, cpu_addr);

    /* Remember: Use "R1 or R1 + 1, whichever is the odd-numbered register"
       as parameter (input). Status (output) is always R1. */

    switch (order_code) {
    case SIGP_SET_ARCH:
        /* switch arch */
        break;
    case SIGP_SENSE:
        /* enumerate CPU status */
        if (cpu_addr) {
            /* XXX implement when SMP comes */
            return 3;
        }
        env->regs[r1] &= 0xffffffff00000000ULL;
        cc = 1;
        break;
#if !defined(CONFIG_USER_ONLY)
    case SIGP_RESTART:
        qemu_system_reset_request();
        cpu_loop_exit(CPU(s390_env_get_cpu(env)));
        break;
    case SIGP_STOP:
        qemu_system_shutdown_request();
        cpu_loop_exit(CPU(s390_env_get_cpu(env)));
        break;
#endif
    default:
        /* unknown sigp */
        fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
        cc = SIGP_CC_NOT_OPERATIONAL;
    }

    return cc;
}
Example #9
0
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
{
    uint64_t addr =  env->regs[r1];
    uint64_t subcode = env->regs[r3];

    if (env->psw.mask & PSW_MASK_PSTATE) {
        program_interrupt(env, PGM_PRIVILEGED, ILEN_LATER_INC);
        return;
    }

    if ((subcode & ~0x0ffffULL) || (subcode > 6)) {
        program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
        return;
    }

    switch (subcode) {
    case 0:
        modified_clear_reset(s390_env_get_cpu(env));
        break;
    case 1:
        load_normal_reset(s390_env_get_cpu(env));
        break;
    case 5:
        if ((r1 & 1) || (addr & 0x0fffULL)) {
            program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
            return;
        }
        env->regs[r1+1] = DIAG_308_RC_INVALID;
        return;
    case 6:
        if ((r1 & 1) || (addr & 0x0fffULL)) {
            program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
            return;
        }
        env->regs[r1+1] = DIAG_308_RC_NO_CONF;
        return;
    default:
        hw_error("Unhandled diag308 subcode %" PRIx64, subcode);
        break;
    }
}
Example #10
0
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
{
    uint64_t old_mask = env->psw.mask;

    env->psw.addr = addr;
    env->psw.mask = mask;

    /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */
    if (!tcg_enabled()) {
        return;
    }
    env->cc_op = (mask >> 44) & 3;

    if ((old_mask ^ mask) & PSW_MASK_PER) {
        s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
    }

    if (mask & PSW_MASK_WAIT) {
        s390_handle_wait(s390_env_get_cpu(env));
    }
}
Example #11
0
void HELPER(per_check_exception)(CPUS390XState *env)
{
    CPUState *cs = CPU(s390_env_get_cpu(env));

    if (env->per_perc_atmid) {
        env->int_pgm_code = PGM_PER;
        env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, env->per_address));

        cs->exception_index = EXCP_PGM;
        cpu_loop_exit(cs);
    }
}
Example #12
0
void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen,
                            uintptr_t ra)
{
    S390CPU *cpu = s390_env_get_cpu(env);

    if (kvm_enabled()) {
        kvm_s390_program_interrupt(cpu, code);
    } else if (tcg_enabled()) {
        tcg_s390_program_interrupt(env, code, ilen, ra);
    } else {
        g_assert_not_reached();
    }
}
Example #13
0
LowCore *cpu_map_lowcore(CPUS390XState *env)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    LowCore *lowcore;
    hwaddr len = sizeof(LowCore);

    lowcore = cpu_physical_memory_map(env->psa, &len, 1);

    if (len < sizeof(LowCore)) {
        cpu_abort(CPU(cpu), "Could not map lowcore\n");
    }

    return lowcore;
}
Example #14
0
/* Raise an exception dynamically from a helper function.  */
void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
                                     uintptr_t retaddr)
{
    CPUState *cs = CPU(s390_env_get_cpu(env));

    cs->exception_index = EXCP_PGM;
    env->int_pgm_code = excp;
    env->int_pgm_ilen = ILEN_AUTO;

    /* Use the (ultimate) callers address to find the insn that trapped.  */
    cpu_restore_state(cs, retaddr);

    cpu_loop_exit(cs);
}
Example #15
0
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
{
    uint64_t old_mask = env->psw.mask;

    env->psw.addr = addr;
    env->psw.mask = mask;
    if (tcg_enabled()) {
        env->cc_op = (mask >> 44) & 3;
    }

    if ((old_mask ^ mask) & PSW_MASK_PER) {
        s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
    }

    if (mask & PSW_MASK_WAIT) {
        S390CPU *cpu = s390_env_get_cpu(env);
        if (s390_cpu_halt(cpu) == 0) {
#ifndef CONFIG_USER_ONLY
            qemu_system_shutdown_request();
#endif
        }
    }
}
Example #16
0
void program_interrupt(CPUS390XState *env, uint32_t code, int ilc)
{
    qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
                  env->psw.addr);

    if (kvm_enabled()) {
#ifdef CONFIG_KVM
        kvm_s390_interrupt(s390_env_get_cpu(env), KVM_S390_PROGRAM_INT, code);
#endif
    } else {
        env->int_pgm_code = code;
        env->int_pgm_ilc = ilc;
        env->exception_index = EXCP_PGM;
        cpu_loop_exit(env);
    }
}
Example #17
0
void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
{
    if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
        env->per_address = addr;
        env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);

        /* If the instruction has to be nullified, trigger the
           exception immediately. */
        if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
            CPUState *cs = CPU(s390_env_get_cpu(env));

            env->int_pgm_code = PGM_PER;
            env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));

            cs->exception_index = EXCP_PGM;
            cpu_loop_exit(cs);
        }
    }
}
Example #18
0
/* Raise an exception dynamically from a helper function.  */
void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
                                     uintptr_t retaddr)
{
    CPUState *cs = CPU(s390_env_get_cpu(env));
    int t;

    cs->exception_index = EXCP_PGM;
    env->int_pgm_code = excp;

    /* Use the (ultimate) callers address to find the insn that trapped.  */
    cpu_restore_state(cs, retaddr);

    /* Advance past the insn.  */
    t = cpu_ldub_code(env, env->psw.addr);
    env->int_pgm_ilen = t = get_ilen(t);
    env->psw.addr += t;

    cpu_loop_exit(cs);
}
Example #19
0
static void do_ext_interrupt(CPUS390XState *env)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    uint64_t mask, addr;
    LowCore *lowcore;
    ExtQueue *q;

    if (!(env->psw.mask & PSW_MASK_EXT)) {
        cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
    }

    if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
        cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
    }

    q = &env->ext_queue[env->ext_index];
    lowcore = cpu_map_lowcore(env);

    lowcore->ext_int_code = cpu_to_be16(q->code);
    lowcore->ext_params = cpu_to_be32(q->param);
    lowcore->ext_params2 = cpu_to_be64(q->param64);
    lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
    lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
    lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
    mask = be64_to_cpu(lowcore->external_new_psw.mask);
    addr = be64_to_cpu(lowcore->external_new_psw.addr);

    cpu_unmap_lowcore(lowcore);

    env->ext_index--;
    if (env->ext_index == -1) {
        env->pending_int &= ~INTERRUPT_EXT;
    }

    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
            env->psw.mask, env->psw.addr);

    load_psw(env, mask, addr);
}
Example #20
0
static void do_io_interrupt(CPUS390XState *env)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    LowCore *lowcore;
    IOIntQueue *q;
    uint8_t isc;
    int disable = 1;
    int found = 0;

    if (!(env->psw.mask & PSW_MASK_IO)) {
        cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
    }

    for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
        uint64_t isc_bits;

        if (env->io_index[isc] < 0) {
            continue;
        }
        if (env->io_index[isc] >= MAX_IO_QUEUE) {
            cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
                      isc, env->io_index[isc]);
        }

        q = &env->io_queue[env->io_index[isc]][isc];
        isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
        if (!(env->cregs[6] & isc_bits)) {
            disable = 0;
            continue;
        }
        if (!found) {
            uint64_t mask, addr;

            found = 1;
            lowcore = cpu_map_lowcore(env);

            lowcore->subchannel_id = cpu_to_be16(q->id);
            lowcore->subchannel_nr = cpu_to_be16(q->nr);
            lowcore->io_int_parm = cpu_to_be32(q->parm);
            lowcore->io_int_word = cpu_to_be32(q->word);
            lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
            lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
            mask = be64_to_cpu(lowcore->io_new_psw.mask);
            addr = be64_to_cpu(lowcore->io_new_psw.addr);

            cpu_unmap_lowcore(lowcore);

            env->io_index[isc]--;

            DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
                    env->psw.mask, env->psw.addr);
            load_psw(env, mask, addr);
        }
        if (env->io_index[isc] >= 0) {
            disable = 0;
        }
        continue;
    }

    if (disable) {
        env->pending_int &= ~INTERRUPT_IO;
    }

}
Example #21
0
static void do_mchk_interrupt(CPUS390XState *env)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    uint64_t mask, addr;
    LowCore *lowcore;
    MchkQueue *q;
    int i;

    if (!(env->psw.mask & PSW_MASK_MCHECK)) {
        cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
    }

    if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
        cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
    }

    q = &env->mchk_queue[env->mchk_index];

    if (q->type != 1) {
        /* Don't know how to handle this... */
        cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
    }
    if (!(env->cregs[14] & (1 << 28))) {
        /* CRW machine checks disabled */
        return;
    }

    lowcore = cpu_map_lowcore(env);

    for (i = 0; i < 16; i++) {
        lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
        lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
        lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
        lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
    }
    lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
    lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
    lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
    lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
    lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
    lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
    lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);

    lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
    lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
    lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
    lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
    mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
    addr = be64_to_cpu(lowcore->mcck_new_psw.addr);

    cpu_unmap_lowcore(lowcore);

    env->mchk_index--;
    if (env->mchk_index == -1) {
        env->pending_int &= ~INTERRUPT_MCHK;
    }

    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
            env->psw.mask, env->psw.addr);

    load_psw(env, mask, addr);
}
Example #22
0
static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
                                  uint64_t src, uint64_t dst, uint64_t vr)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    uint32_t r = 0;

    switch (cc_op) {
    case CC_OP_CONST0:
    case CC_OP_CONST1:
    case CC_OP_CONST2:
    case CC_OP_CONST3:
        /* cc_op value _is_ cc */
        r = cc_op;
        break;
    case CC_OP_LTGT0_32:
        r = cc_calc_ltgt0_32(dst);
        break;
    case CC_OP_LTGT0_64:
        r =  cc_calc_ltgt0_64(dst);
        break;
    case CC_OP_LTGT_32:
        r =  cc_calc_ltgt_32(src, dst);
        break;
    case CC_OP_LTGT_64:
        r =  cc_calc_ltgt_64(src, dst);
        break;
    case CC_OP_LTUGTU_32:
        r =  cc_calc_ltugtu_32(src, dst);
        break;
    case CC_OP_LTUGTU_64:
        r =  cc_calc_ltugtu_64(src, dst);
        break;
    case CC_OP_TM_32:
        r =  cc_calc_tm_32(src, dst);
        break;
    case CC_OP_TM_64:
        r =  cc_calc_tm_64(src, dst);
        break;
    case CC_OP_NZ:
        r =  cc_calc_nz(dst);
        break;
    case CC_OP_ADD_64:
        r =  cc_calc_add_64(src, dst, vr);
        break;
    case CC_OP_ADDU_64:
        r =  cc_calc_addu_64(src, dst, vr);
        break;
    case CC_OP_ADDC_64:
        r =  cc_calc_addc_64(src, dst, vr);
        break;
    case CC_OP_SUB_64:
        r =  cc_calc_sub_64(src, dst, vr);
        break;
    case CC_OP_SUBU_64:
        r =  cc_calc_subu_64(src, dst, vr);
        break;
    case CC_OP_SUBB_64:
        r =  cc_calc_subb_64(src, dst, vr);
        break;
    case CC_OP_ABS_64:
        r =  cc_calc_abs_64(dst);
        break;
    case CC_OP_NABS_64:
        r =  cc_calc_nabs_64(dst);
        break;
    case CC_OP_COMP_64:
        r =  cc_calc_comp_64(dst);
        break;

    case CC_OP_ADD_32:
        r =  cc_calc_add_32(src, dst, vr);
        break;
    case CC_OP_ADDU_32:
        r =  cc_calc_addu_32(src, dst, vr);
        break;
    case CC_OP_ADDC_32:
        r =  cc_calc_addc_32(src, dst, vr);
        break;
    case CC_OP_SUB_32:
        r =  cc_calc_sub_32(src, dst, vr);
        break;
    case CC_OP_SUBU_32:
        r =  cc_calc_subu_32(src, dst, vr);
        break;
    case CC_OP_SUBB_32:
        r =  cc_calc_subb_32(src, dst, vr);
        break;
    case CC_OP_ABS_32:
        r =  cc_calc_abs_32(dst);
        break;
    case CC_OP_NABS_32:
        r =  cc_calc_nabs_32(dst);
        break;
    case CC_OP_COMP_32:
        r =  cc_calc_comp_32(dst);
        break;

    case CC_OP_ICM:
        r =  cc_calc_icm(src, dst);
        break;
    case CC_OP_SLA_32:
        r =  cc_calc_sla_32(src, dst);
        break;
    case CC_OP_SLA_64:
        r =  cc_calc_sla_64(src, dst);
        break;
    case CC_OP_FLOGR:
        r = cc_calc_flogr(dst);
        break;

    case CC_OP_NZ_F32:
        r = set_cc_nz_f32(dst);
        break;
    case CC_OP_NZ_F64:
        r = set_cc_nz_f64(dst);
        break;
    case CC_OP_NZ_F128:
        r = set_cc_nz_f128(make_float128(src, dst));
        break;

    default:
        cpu_abort(CPU(cpu), "Unknown CC operation: %s\n", cc_name(cc_op));
    }

    HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
               cc_name(cc_op), src, dst, vr, r);
    return r;
}
Example #23
0
void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    ioinst_handle_chsc(cpu, inst >> 16);
}
Example #24
0
void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    ioinst_handle_tsch(cpu, r1, inst >> 16);
}
Example #25
0
void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    ioinst_handle_rsch(cpu, r1);
}
Example #26
0
void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
{
    load_psw(env, mask, addr);
    cpu_loop_exit(CPU(s390_env_get_cpu(env)));
}
Example #27
0
/* Store System Information */
uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
                      uint64_t r0, uint64_t r1)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    int cc = 0;
    int sel1, sel2;

    if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
        ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
        /* valid function code, invalid reserved bits */
        program_interrupt(env, PGM_SPECIFICATION, 4);
    }

    sel1 = r0 & STSI_R0_SEL1_MASK;
    sel2 = r1 & STSI_R1_SEL2_MASK;

    /* XXX: spec exception if sysib is not 4k-aligned */

    switch (r0 & STSI_LEVEL_MASK) {
    case STSI_LEVEL_1:
        if ((sel1 == 1) && (sel2 == 1)) {
            /* Basic Machine Configuration */
            struct sysib_111 sysib;
            char type[5] = {};

            memset(&sysib, 0, sizeof(sysib));
            ebcdic_put(sysib.manuf, "QEMU            ", 16);
            /* same as machine type number in STORE CPU ID, but in EBCDIC */
            snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
            ebcdic_put(sysib.type, type, 4);
            /* model number (not stored in STORE CPU ID for z/Architecure) */
            ebcdic_put(sysib.model, "QEMU            ", 16);
            ebcdic_put(sysib.sequence, "QEMU            ", 16);
            ebcdic_put(sysib.plant, "QEMU", 4);
            cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
        } else if ((sel1 == 2) && (sel2 == 1)) {
            /* Basic Machine CPU */
            struct sysib_121 sysib;

            memset(&sysib, 0, sizeof(sysib));
            /* XXX make different for different CPUs? */
            ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
            ebcdic_put(sysib.plant, "QEMU", 4);
            stw_p(&sysib.cpu_addr, env->cpu_num);
            cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
        } else if ((sel1 == 2) && (sel2 == 2)) {
            /* Basic Machine CPUs */
            struct sysib_122 sysib;

            memset(&sysib, 0, sizeof(sysib));
            stl_p(&sysib.capability, 0x443afc29);
            /* XXX change when SMP comes */
            stw_p(&sysib.total_cpus, 1);
            stw_p(&sysib.active_cpus, 1);
            stw_p(&sysib.standby_cpus, 0);
            stw_p(&sysib.reserved_cpus, 0);
            cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
        } else {
            cc = 3;
        }
        break;
    case STSI_LEVEL_2:
        {
            if ((sel1 == 2) && (sel2 == 1)) {
                /* LPAR CPU */
                struct sysib_221 sysib;

                memset(&sysib, 0, sizeof(sysib));
                /* XXX make different for different CPUs? */
                ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
                ebcdic_put(sysib.plant, "QEMU", 4);
                stw_p(&sysib.cpu_addr, env->cpu_num);
                stw_p(&sysib.cpu_id, 0);
                cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
            } else if ((sel1 == 2) && (sel2 == 2)) {
                /* LPAR CPUs */
                struct sysib_222 sysib;

                memset(&sysib, 0, sizeof(sysib));
                stw_p(&sysib.lpar_num, 0);
                sysib.lcpuc = 0;
                /* XXX change when SMP comes */
                stw_p(&sysib.total_cpus, 1);
                stw_p(&sysib.conf_cpus, 1);
                stw_p(&sysib.standby_cpus, 0);
                stw_p(&sysib.reserved_cpus, 0);
                ebcdic_put(sysib.name, "QEMU    ", 8);
                stl_p(&sysib.caf, 1000);
                stw_p(&sysib.dedicated_cpus, 0);
                stw_p(&sysib.shared_cpus, 0);
                cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
            } else {
                cc = 3;
            }
            break;
        }
    case STSI_LEVEL_3:
        {
            if ((sel1 == 2) && (sel2 == 2)) {
                /* VM CPUs */
                struct sysib_322 sysib;

                memset(&sysib, 0, sizeof(sysib));
                sysib.count = 1;
                /* XXX change when SMP comes */
                stw_p(&sysib.vm[0].total_cpus, 1);
                stw_p(&sysib.vm[0].conf_cpus, 1);
                stw_p(&sysib.vm[0].standby_cpus, 0);
                stw_p(&sysib.vm[0].reserved_cpus, 0);
                ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
                stl_p(&sysib.vm[0].caf, 1000);
                ebcdic_put(sysib.vm[0].cpi, "KVM/Linux       ", 16);
                cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
            } else {
                cc = 3;
            }
            break;
        }
    case STSI_LEVEL_CURRENT:
        env->regs[0] = STSI_LEVEL_3;
        break;
    default:
        cc = 3;
        break;
    }

    return cc;
}
Example #28
0
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
{
    uint64_t addr =  env->regs[r1];
    uint64_t subcode = env->regs[r3];
    IplParameterBlock *iplb;

    if (env->psw.mask & PSW_MASK_PSTATE) {
        program_interrupt(env, PGM_PRIVILEGED, ILEN_LATER_INC);
        return;
    }

    if ((subcode & ~0x0ffffULL) || (subcode > 6)) {
        program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
        return;
    }

    switch (subcode) {
    case 0:
        modified_clear_reset(s390_env_get_cpu(env));
        if (tcg_enabled()) {
            cpu_loop_exit(CPU(s390_env_get_cpu(env)));
        }
        break;
    case 1:
        load_normal_reset(s390_env_get_cpu(env));
        if (tcg_enabled()) {
            cpu_loop_exit(CPU(s390_env_get_cpu(env)));
        }
        break;
    case 3:
        s390_reipl_request();
        if (tcg_enabled()) {
            cpu_loop_exit(CPU(s390_env_get_cpu(env)));
        }
        break;
    case 5:
        if ((r1 & 1) || (addr & 0x0fffULL)) {
            program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
            return;
        }
        if (!address_space_access_valid(&address_space_memory, addr,
                                        sizeof(IplParameterBlock), false)) {
            program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC);
            return;
        }
        iplb = g_malloc0(sizeof(struct IplParameterBlock));
        cpu_physical_memory_read(addr, iplb, sizeof(struct IplParameterBlock));
        if (!s390_ipl_update_diag308(iplb)) {
            env->regs[r1 + 1] = DIAG_308_RC_OK;
        } else {
            env->regs[r1 + 1] = DIAG_308_RC_INVALID;
        }
        g_free(iplb);
        return;
    case 6:
        if ((r1 & 1) || (addr & 0x0fffULL)) {
            program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
            return;
        }
        if (!address_space_access_valid(&address_space_memory, addr,
                                        sizeof(IplParameterBlock), true)) {
            program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC);
            return;
        }
        iplb = s390_ipl_get_iplb();
        if (iplb) {
            cpu_physical_memory_write(addr, iplb,
                                      sizeof(struct IplParameterBlock));
            env->regs[r1 + 1] = DIAG_308_RC_OK;
        } else {
            env->regs[r1 + 1] = DIAG_308_RC_NO_CONF;
        }
        return;
    default:
        hw_error("Unhandled diag308 subcode %" PRIx64, subcode);
        break;
    }
}