static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info) { struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); uint32_t *r = select_user_reg(regs, dabt.reg); int offset = (int)(info->gpa - UART0_START); switch ( offset ) { case UARTDR: *r = 0; return 1; case UARTFR: *r = 0x87; /* All holding registers empty, ready to send etc */ return 1; default: printk("VPL011: unhandled read r%d offset %#08x\n", dabt.reg, offset); domain_crash_synchronous(); } }
static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) { struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); uint32_t *r = select_user_reg(regs, dabt.reg); int offset = (int)(info->gpa - UART0_START); switch ( offset ) { case UARTDR: /* ignore any status bits */ uart0_print_char((int)((*r) & 0xFF)); return 1; case UARTFR: /* Silently ignore */ return 1; default: printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n", dabt.reg, *r, offset); domain_crash_synchronous(); } }
void vm_resume_fail(unsigned long eflags) { unsigned long error = __vmread(VM_INSTRUCTION_ERROR); printk("<vm_resume_fail> error code %lx\n", error); domain_crash_synchronous(); }
static inline void hvm_mmio_access(struct vcpu *v, ioreq_t *p, hvm_mmio_read_t read_handler, hvm_mmio_write_t write_handler) { unsigned int tmp1, tmp2; unsigned long data; switch ( p->type ) { case IOREQ_TYPE_COPY: { if ( !p->data_is_ptr ) { if ( p->dir == IOREQ_READ ) p->data = read_handler(v, p->addr, p->size); else /* p->dir == IOREQ_WRITE */ write_handler(v, p->addr, p->size, p->data); } else { /* p->data_is_ptr */ int i, sign = (p->df) ? -1 : 1; if ( p->dir == IOREQ_READ ) { for ( i = 0; i < p->count; i++ ) { data = read_handler(v, p->addr + (sign * i * p->size), p->size); (void)hvm_copy_to_guest_phys( p->data + (sign * i * p->size), &data, p->size); } } else {/* p->dir == IOREQ_WRITE */ for ( i = 0; i < p->count; i++ ) { (void)hvm_copy_from_guest_phys( &data, p->data + (sign * i * p->size), p->size); write_handler(v, p->addr + (sign * i * p->size), p->size, data); } } } break; } case IOREQ_TYPE_AND: tmp1 = read_handler(v, p->addr, p->size); if ( p->dir == IOREQ_WRITE ) { tmp2 = tmp1 & (unsigned long) p->data; write_handler(v, p->addr, p->size, tmp2); } p->data = tmp1; break; case IOREQ_TYPE_ADD: tmp1 = read_handler(v, p->addr, p->size); if (p->dir == IOREQ_WRITE) { tmp2 = tmp1 + (unsigned long) p->data; write_handler(v, p->addr, p->size, tmp2); } p->data = tmp1; break; case IOREQ_TYPE_OR: tmp1 = read_handler(v, p->addr, p->size); if ( p->dir == IOREQ_WRITE ) { tmp2 = tmp1 | (unsigned long) p->data; write_handler(v, p->addr, p->size, tmp2); } p->data = tmp1; break; case IOREQ_TYPE_XOR: tmp1 = read_handler(v, p->addr, p->size); if ( p->dir == IOREQ_WRITE ) { tmp2 = tmp1 ^ (unsigned long) p->data; write_handler(v, p->addr, p->size, tmp2); } p->data = tmp1; break; case IOREQ_TYPE_XCHG: /* * Note that we don't need to be atomic here since VCPU is accessing * its own local APIC. */ tmp1 = read_handler(v, p->addr, p->size); write_handler(v, p->addr, p->size, (unsigned long) p->data); p->data = tmp1; break; case IOREQ_TYPE_SUB: tmp1 = read_handler(v, p->addr, p->size); if ( p->dir == IOREQ_WRITE ) { tmp2 = tmp1 - (unsigned long) p->data; write_handler(v, p->addr, p->size, tmp2); } p->data = tmp1; break; default: printk("hvm_mmio_access: error ioreq type %x\n", p->type); domain_crash_synchronous(); break; } }