struct mmio_access mmio_parse(struct per_cpu *cpu_data, unsigned long pc, const struct guest_paging_structures *pg_structs, bool is_write) { struct mmio_access access = { .inst_len = 0 }; bool has_regr, has_modrm, does_write; struct modrm modrm; struct sib sib; u8 *page = NULL; access.inst_len = 0; has_regr = false; restart: page = map_code_page(cpu_data, pg_structs, pc, page); if (!page) goto error_nopage; has_modrm = false; switch (page[pc & PAGE_OFFS_MASK]) { case X86_OP_REGR_PREFIX: if (has_regr) goto error_unsupported; has_regr = true; pc++; access.inst_len++; goto restart; case X86_OP_MOV_TO_MEM: access.inst_len += 2; access.size = 4; has_modrm = true; does_write = true; break; case X86_OP_MOV_FROM_MEM: access.inst_len += 2; access.size = 4; has_modrm = true; does_write = false; break; default: goto error_unsupported; } if (has_modrm) { pc++; page = map_code_page(cpu_data, pg_structs, pc, page); if (!page) goto error_nopage; modrm = *(struct modrm *)&page[pc & PAGE_OFFS_MASK]; switch (modrm.mod) { case 0: if (modrm.rm != 4) goto error_unsupported; pc++; page = map_code_page(cpu_data, pg_structs, pc, page); if (!page) goto error_nopage; sib = *(struct sib *)&page[pc & PAGE_OFFS_MASK]; if (sib.ss != 0 || sib.index != 4 || sib.reg != 5) goto error_unsupported; access.inst_len += 5; break; case 2: access.inst_len += 4; break; default: goto error_unsupported; } if (has_regr) access.reg = 7 - modrm.reg; else if (modrm.reg == 4) goto error_unsupported; else access.reg = 15 - modrm.reg; } if (does_write != is_write) goto error_inconsitent; return access; error_nopage: panic_printk("FATAL: unable to map MMIO instruction page\n"); goto error; error_unsupported: panic_printk("FATAL: unsupported instruction\n"); goto error; error_inconsitent: panic_printk("FATAL: inconsistent access, expected %s instruction\n", is_write ? "write" : "read"); error: access.inst_len = 0; return access; }
struct mmio_access mmio_parse(unsigned long pc, const struct guest_paging_structures *pg_structs, bool is_write) { struct mmio_access access = { .inst_len = 0 }; union opcode op[3] = { }; bool has_rex_r = false; bool does_write; u8 *page = NULL; restart: page = map_code_page(pg_structs, pc, page); if (!page) goto error_nopage; op[0].raw = page[pc & PAGE_OFFS_MASK]; if (op[0].rex.code == X86_REX_CODE) { /* REX.W is simply over-read since it is only affects the * memory address in our supported modes which we get from the * virtualization support. */ if (op[0].rex.r) has_rex_r = true; if (op[0].rex.x) goto error_unsupported; pc++; access.inst_len++; goto restart; } switch (op[0].raw) { case X86_OP_MOV_TO_MEM: access.inst_len += 2; access.size = 4; does_write = true; break; case X86_OP_MOV_FROM_MEM: access.inst_len += 2; access.size = 4; does_write = false; break; default: goto error_unsupported; } pc++; page = map_code_page(pg_structs, pc, page); if (!page) goto error_nopage; op[1].raw = page[pc & PAGE_OFFS_MASK]; switch (op[1].modrm.mod) { case 0: if (op[1].modrm.rm == 5) /* 32-bit displacement */ goto error_unsupported; else if (op[1].modrm.rm != 4) /* no SIB */ break; access.inst_len++; pc++; page = map_code_page(pg_structs, pc, page); if (!page) goto error_nopage; op[2].raw = page[pc & PAGE_OFFS_MASK]; if (op[2].sib.base == 5) access.inst_len += 4; break; case 1: case 2: if (op[1].modrm.rm == 4) /* SIB */ goto error_unsupported; access.inst_len += op[1].modrm.mod == 1 ? 1 : 4; break; default: goto error_unsupported; } if (has_rex_r) access.reg = 7 - op[1].modrm.reg; else if (op[1].modrm.reg == 4) goto error_unsupported; else access.reg = 15 - op[1].modrm.reg; if (does_write != is_write) goto error_inconsitent; return access; error_nopage: panic_printk("FATAL: unable to map MMIO instruction page\n"); goto error; error_unsupported: panic_printk("FATAL: unsupported instruction (0x%02x 0x%02x 0x%02x)\n", op[0].raw, op[1].raw, op[2].raw); goto error; error_inconsitent: panic_printk("FATAL: inconsistent access, expected %s instruction\n", is_write ? "write" : "read"); error: access.inst_len = 0; return access; }