static int vtimer_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr) { struct hsr_cp64 cp64 = hsr.cp64; uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1); uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2); uint64_t x = (uint64_t)(*r1) | ((uint64_t)(*r2) << 32); if ( cp64.read ) perfc_incr(vtimer_cp64_reads); else perfc_incr(vtimer_cp64_writes); switch ( hsr.bits & HSR_CP64_REGS_MASK ) { case HSR_CPREG64(CNTP_CVAL): if ( !vtimer_cntp_cval(regs, &x, cp64.read) ) return 0; break; default: return 0; } if ( cp64.read ) { *r1 = (uint32_t)(x & 0xffffffff); *r2 = (uint32_t)(x >> 32); }
int handle_mmio(mmio_info_t *info) { struct vcpu *v = current; int i; const struct mmio_handler *handler = NULL; const struct vmmio *vmmio = &v->domain->arch.vmmio; struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); register_t *r = select_user_reg(regs, dabt.reg); for ( i = 0; i < vmmio->num_entries; i++ ) { handler = &vmmio->handlers[i]; if ( (info->gpa >= handler->addr) && (info->gpa < (handler->addr + handler->size)) ) break; } if ( i == vmmio->num_entries ) return 0; if ( info->dabt.write ) return handler->ops->write(v, info, *r, handler->priv); else return handle_read(handler, v, info, r); }
static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) { struct vcpu *v = current; struct hsr_cp32 cp32 = hsr.cp32; uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg); s_time_t now; switch ( hsr.bits & HSR_CP32_REGS_MASK ) { case HSR_CPREG32(CNTP_CTL): if ( cp32.read ) { *r = v->arch.phys_timer.ctl; } else { uint32_t ctl = *r & ~CNTx_CTL_PENDING; if ( ctl & CNTx_CTL_ENABLE ) ctl |= v->arch.phys_timer.ctl & CNTx_CTL_PENDING; v->arch.phys_timer.ctl = ctl; if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE ) { set_timer(&v->arch.phys_timer.timer, v->arch.phys_timer.cval + v->domain->arch.phys_timer_base.offset); } else stop_timer(&v->arch.phys_timer.timer); } return 1; case HSR_CPREG32(CNTP_TVAL): now = NOW() - v->domain->arch.phys_timer_base.offset; if ( cp32.read ) { *r = (uint32_t)(ns_to_ticks(v->arch.phys_timer.cval - now) & 0xffffffffull); } else { v->arch.phys_timer.cval = now + ticks_to_ns(*r); if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE ) { v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING; set_timer(&v->arch.phys_timer.timer, v->arch.phys_timer.cval + v->domain->arch.phys_timer_base.offset); } } return 1; default: return 0; } }
static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr) { struct vcpu *v = current; struct hsr_cp64 cp64 = hsr.cp64; uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1); uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2); uint64_t ticks; s_time_t now; switch ( hsr.bits & HSR_CP64_REGS_MASK ) { case HSR_CPREG64(CNTPCT): if ( cp64.read ) { now = NOW() - v->arch.phys_timer.offset; ticks = ns_to_ticks(now); *r1 = (uint32_t)(ticks & 0xffffffff); *r2 = (uint32_t)(ticks >> 32); return 1; } else {
static int vtimer_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr) { struct hsr_cp64 cp64 = hsr.cp64; uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1); uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2); uint64_t x; switch ( hsr.bits & HSR_CP64_REGS_MASK ) { case HSR_CPREG64(CNTPCT): if (!vtimer_cntpct(regs, &x, cp64.read)) return 0; if ( cp64.read ) { *r1 = (uint32_t)(x & 0xffffffff); *r2 = (uint32_t)(x >> 32); } return 1; default: return 0; }
static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, void *priv) { struct domain *d = v->domain; struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); register_t *r = select_user_reg(regs, dabt.reg); paddr_t offset = info->gpa - d->arch.vuart.info->base_addr; perfc_incr(vuart_writes); if ( offset == d->arch.vuart.info->data_off ) /* ignore any status bits */ vuart_print_char(v, *r & 0xFF); return 1; }
static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, void *priv) { struct domain *d = v->domain; struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); register_t *r = select_user_reg(regs, dabt.reg); paddr_t offset = info->gpa - d->arch.vuart.info->base_addr; perfc_incr(vuart_reads); /* By default zeroed the register */ *r = 0; if ( offset == d->arch.vuart.info->status_off ) /* All holding registers empty, ready to send etc */ *r = d->arch.vuart.info->status; return 1; }
static int vtimer_emulate_cp32(struct cpu_user_regs *regs, union hsr hsr) { struct hsr_cp32 cp32 = hsr.cp32; uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg); switch ( hsr.bits & HSR_CP32_REGS_MASK ) { case HSR_CPREG32(CNTP_CTL): vtimer_cntp_ctl(regs, r, cp32.read); return 1; case HSR_CPREG32(CNTP_TVAL): vtimer_cntp_tval(regs, r, cp32.read); return 1; default: return 0; } }
static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info) { struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); uint32_t *r = select_user_reg(regs, dabt.reg); int offset = (int)(info->gpa - UART0_START); switch ( offset ) { case UARTDR: *r = 0; return 1; case UARTFR: *r = 0x87; /* All holding registers empty, ready to send etc */ return 1; default: printk("VPL011: unhandled read r%d offset %#08x\n", dabt.reg, offset); domain_crash_synchronous(); } }
static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) { struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); uint32_t *r = select_user_reg(regs, dabt.reg); int offset = (int)(info->gpa - UART0_START); switch ( offset ) { case UARTDR: /* ignore any status bits */ uart0_print_char((int)((*r) & 0xFF)); return 1; case UARTFR: /* Silently ignore */ return 1; default: printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n", dabt.reg, *r, offset); domain_crash_synchronous(); } }