static int mac802154_parse_frame_start(struct sk_buff *skb) { int hlen; struct ieee802154_hdr hdr; hlen = ieee802154_hdr_pull(skb, &hdr); if (hlen < 0) return -EINVAL; skb->mac_len = hlen; pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr.fc), hdr.seq); mac_cb(skb)->flags = hdr.fc.type; if (hdr.fc.ack_request) mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ; if (hdr.fc.security_enabled) mac_cb(skb)->flags |= MAC_CB_FLAG_SECEN; mac802154_print_addr("destination", &hdr.dest); mac802154_print_addr("source", &hdr.source); mac_cb(skb)->source = hdr.source; mac_cb(skb)->dest = hdr.dest; if (hdr.fc.security_enabled) { u64 key; pr_debug("seclevel %i\n", hdr.sec.level); switch (hdr.sec.key_id_mode) { case IEEE802154_SCF_KEY_IMPLICIT: pr_debug("implicit key\n"); break; case IEEE802154_SCF_KEY_INDEX: pr_debug("key %02x\n", hdr.sec.key_id); break; case IEEE802154_SCF_KEY_SHORT_INDEX: pr_debug("key %04x:%04x %02x\n", le32_to_cpu(hdr.sec.short_src) >> 16, le32_to_cpu(hdr.sec.short_src) & 0xffff, hdr.sec.key_id); break; case IEEE802154_SCF_KEY_HW_INDEX: key = swab64((__force u64) hdr.sec.extended_src); pr_debug("key source %8phC %02x\n", &key, hdr.sec.key_id); break; } return -EINVAL; }
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian) { void *data = run->mmio.data; int idx, ret; bool host_swabbed; /* Pity C doesn't have a logical XOR operator */ if (kvmppc_need_byteswap(vcpu)) { host_swabbed = is_default_endian; } else { host_swabbed = !is_default_endian; } if (bytes > sizeof(run->mmio.data)) { printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, run->mmio.len); } run->mmio.phys_addr = vcpu->arch.paddr_accessed; run->mmio.len = bytes; run->mmio.is_write = 1; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; /* Store the value at the lowest bytes in 'data'. */ if (!host_swabbed) { switch (bytes) { case 8: *(u64 *)data = val; break; case 4: *(u32 *)data = val; break; case 2: *(u16 *)data = val; break; case 1: *(u8 *)data = val; break; } } else { switch (bytes) { case 8: *(u64 *)data = swab64(val); break; case 4: *(u32 *)data = swab32(val); break; case 2: *(u16 *)data = swab16(val); break; case 1: *(u8 *)data = val; break; } } idx = srcu_read_lock(&vcpu->kvm->srcu); ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, bytes, &run->mmio.data); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (!ret) { vcpu->mmio_needed = 0; return EMULATE_DONE; } return EMULATE_DO_MMIO; }
void gdb_arch_read_reg(unsigned long regnum, struct cpu_user_regs *regs, struct gdb_context *ctx) { struct gdb_callback_arg arg; unsigned long reg; struct pt_fpreg freg; char buf[16 * 2 + 1]; if (regnum >= NUM_REGS) { dbg_printk("%s: regnum %ld\n", __func__, regnum); goto out_err; } arg.regs = regs; arg.regnum = regnum; arg.reg = ® arg.freg = &freg; arg.error = 0; unw_init_running(&gdb_get_reg_callback, (void*)&arg); if (arg.error < 0) { dbg_printk("%s: gdb_get_reg_callback failed\n", __func__); goto out_err; } if (arg.error > 0) { // notify gdb that this register is not supported. // see fetch_register_using_p() in gdb/remote.c. safe_strcpy(buf, "x"); } else if (IA64_FR0_REGNUM <= regnum && regnum <= IA64_FR0_REGNUM + 127) { snprintf(buf, sizeof(buf), "%.016lx", swab64(freg.u.bits[0])); snprintf(buf + 16, sizeof(buf) - 16, "%.016lx", swab64(freg.u.bits[1])); } else { snprintf(buf, sizeof(buf), "%.016lx", swab64(reg)); } out: return gdb_send_reply(buf, ctx); out_err: dbg_printk("Register read unsupported regnum = 0x%lx\n", regnum); safe_strcpy(buf, "E0"); goto out; }
static int ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr) { int hlen; struct ieee802154_mac_cb *cb = mac_cb_init(skb); skb_reset_mac_header(skb); hlen = ieee802154_hdr_pull(skb, hdr); if (hlen < 0) return -EINVAL; skb->mac_len = hlen; pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr->fc), hdr->seq); cb->type = hdr->fc.type; cb->ackreq = hdr->fc.ack_request; cb->secen = hdr->fc.security_enabled; ieee802154_print_addr("destination", &hdr->dest); ieee802154_print_addr("source", &hdr->source); cb->source = hdr->source; cb->dest = hdr->dest; if (hdr->fc.security_enabled) { u64 key; pr_debug("seclevel %i\n", hdr->sec.level); switch (hdr->sec.key_id_mode) { case IEEE802154_SCF_KEY_IMPLICIT: pr_debug("implicit key\n"); break; case IEEE802154_SCF_KEY_INDEX: pr_debug("key %02x\n", hdr->sec.key_id); break; case IEEE802154_SCF_KEY_SHORT_INDEX: pr_debug("key %04x:%04x %02x\n", le32_to_cpu(hdr->sec.short_src) >> 16, le32_to_cpu(hdr->sec.short_src) & 0xffff, hdr->sec.key_id); break; case IEEE802154_SCF_KEY_HW_INDEX: key = swab64((__force u64)hdr->sec.extended_src); pr_debug("key source %8phC %02x\n", &key, hdr->sec.key_id); break; } }
int do_mem_mw64 ( cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) { uint64_t addr, writeval, count; int size; int swap = 0; if ((argc < 3) || (argc > 4)) return CMD_RET_USAGE; /* Check for size specification. */ if ((size = cmd_get_data_size(argv[0], 8)) < 1) return 1; swap = cmd_get_data_swap64(argv[0]); /* Address is specified since argc > 1 */ addr = simple_strtoull(argv[1], NULL, 16); addr |= base_address64; /* Get the value to write. */ writeval = simple_strtoull(argv[2], NULL, 16); /* Count ? */ if (argc == 4) { count = simple_strtoul(argv[3], NULL, 16); } else { count = 1; } while (count-- > 0) { if (size == 8) { if (swap) writeval = swab64(writeval); cvmx_write_csr(addr, writeval); } else if (size == 4) { if (swap) writeval = swab32(writeval); cvmx_write64_uint32(addr, writeval); } else if (size == 2) { if (swap) writeval = swab16(writeval); cvmx_write64_uint16(addr, writeval); } else { cvmx_write64_uint8(addr, writeval); } addr += size; } return 0; }
static void ieee802154_print_addr(const char *name, const struct ieee802154_addr *addr) { if (addr->mode == IEEE802154_ADDR_NONE) pr_debug("%s not present\n", name); pr_debug("%s PAN ID: %04x\n", name, le16_to_cpu(addr->pan_id)); if (addr->mode == IEEE802154_ADDR_SHORT) { pr_debug("%s is short: %04x\n", name, le16_to_cpu(addr->short_addr)); } else { u64 hw = swab64((__force u64)addr->extended_addr); pr_debug("%s is hardware: %8phC\n", name, &hw); } }
static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) { struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; int i; shared->sprg0 = swab64(shared->sprg0); shared->sprg1 = swab64(shared->sprg1); shared->sprg2 = swab64(shared->sprg2); shared->sprg3 = swab64(shared->sprg3); shared->srr0 = swab64(shared->srr0); shared->srr1 = swab64(shared->srr1); shared->dar = swab64(shared->dar); shared->msr = swab64(shared->msr); shared->dsisr = swab32(shared->dsisr); shared->int_pending = swab32(shared->int_pending); for (i = 0; i < ARRAY_SIZE(shared->sr); i++) shared->sr[i] = swab32(shared->sr[i]); }
static int find_uberblocks(const void *label, loff_t *ub_offset, int *swap_endian) { uint64_t swab_magic = swab64(UBERBLOCK_MAGIC); struct zfs_uberblock *ub; int i, found = 0; loff_t offset = VDEV_LABEL_UBERBLOCK; for (i = 0; i < UBERBLOCKS_COUNT; i++, offset += UBERBLOCK_SIZE) { ub = (struct zfs_uberblock *)(label + offset); if (ub->ub_magic == UBERBLOCK_MAGIC) { *ub_offset = offset; *swap_endian = 0; found++; zdebug("probe_zfs: found little-endian uberblock at %llu\n", offset >> 10); } if (ub->ub_magic == swab_magic) { *ub_offset = offset; *swap_endian = 1; found++; zdebug("probe_zfs: found big-endian uberblock at %llu\n", offset >> 10); }
static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr) { return nla_put_u64(msg, type, swab64((__force u64)hwaddr)); }
int main(int argc, char **argv) { int i; int errors = 0; printf("Testing swab16\n"); i=0; do { printf("swab16(0x%04"PRIx16") = 0x%04"PRIx16"\n", ary16[i], swab16(ary16[i])); if (swab16(ary16[i]) != ary16[i+1]) { printf("Error!!! %04"PRIx16" != %04"PRIx16"\n", swab16(ary16[i]), ary16[i+1]); errors++; } if (swab16(ary16[i+1]) != ary16[i]) { printf("Error!!! %04"PRIx16" != %04"PRIx16"\n", swab16(ary16[i+1]), ary16[i]); errors++; } i += 2; } while (ary16[i] != 0); printf("Testing swab32\n"); i = 0; do { printf("swab32(0x%08"PRIx32") = 0x%08"PRIx32"\n", ary32[i], swab32(ary32[i])); if (swab32(ary32[i]) != ary32[i+1]) { printf("Error!!! %04"PRIx32" != %04"PRIx32"\n", swab32(ary32[i]), ary32[i+1]); errors++; } if (swab32(ary32[i+1]) != ary32[i]) { printf("Error!!! %04"PRIx32" != %04"PRIx32"\n", swab32(ary32[i+1]), ary32[i]); errors++; } i += 2; } while (ary32[i] != 0); printf("Testing swab64\n"); i = 0; do { printf("swab64(0x%016"PRIx64") = 0x%016"PRIx64"\n", ary64[i], swab64(ary64[i])); if (swab64(ary64[i]) != ary64[i+1]) { printf("Error!!! %016"PRIx64" != %016"PRIx64"\n", swab64(ary64[i]), ary64[i+1]); errors++; } if (swab64(ary64[i+1]) != ary64[i]) { printf("Error!!! %016"PRIx64" != %016"PRIx64"\n", swab64(ary64[i+1]), ary64[i]); errors++; } i += 2; } while (ary64[i] != 0); if (!errors) printf("No errors found in the byteswap implementation\n"); return errors; }
int print_buffer64(uint64_t addr, uint64_t data, uint width, uint count, int swap, uint linelen) { /* linebuf as a union causes proper alignment */ union linebuf { uint64_t ud[MAX_LINE_LENGTH_BYTES/sizeof(uint64_t) + 1]; uint32_t ui[MAX_LINE_LENGTH_BYTES/sizeof(uint32_t) + 1]; uint16_t us[MAX_LINE_LENGTH_BYTES/sizeof(uint16_t) + 1]; uint8_t uc[MAX_LINE_LENGTH_BYTES/sizeof(uint8_t) + 1]; } lb; int i; if (linelen*width > MAX_LINE_LENGTH_BYTES) linelen = MAX_LINE_LENGTH_BYTES / width; if (linelen < 1) linelen = DEFAULT_LINE_LENGTH_BYTES / width; while (count) { printf("%016llx:", addr); /* check for overflow condition */ if (count < linelen) linelen = count; /* Copy from memory into linebuf and print hex values */ for (i = 0; i < linelen; i++) { uint64_t x; if (width == 8) { x = lb.ud[i] = cvmx_read_csr(data); if (swap) x = swab64(x); } else if (width == 4) { x = lb.ui[i] = cvmx_read64_uint32(data); if (swap) x = swab32(x); } else if (width == 2) { x = lb.us[i] = cvmx_read64_uint16(data); if (swap) x = swab16(x); } else { x = lb.uc[i] = cvmx_read64_int8(data); } printf(" %0*llx", width * 2, x); data += width; } /* Print data in ASCII characters */ for (i = 0; i < linelen * width; i++) { if (!isprint(lb.uc[i]) || lb.uc[i] >= 0x80) lb.uc[i] = '.'; } lb.uc[i] = '\0'; printf(" %s\n", lb.uc); /* update references */ addr += linelen * width; count -= linelen; if (ctrlc()) return -1; } return 0; }
void gdb_arch_read_reg(unsigned long regnum, struct cpu_user_regs *regs, struct gdb_context *ctx) { unsigned long reg = IA64_IP_REGNUM; char buf[9]; int i; dbg_printk("Register read regnum = 0x%lx\n", regnum); if (IA64_GR0_REGNUM <= regnum && regnum <= IA64_GR0_REGNUM + 31) { for (i = 0; i < gr_reg_to_cpu_user_regs_index_max; i++) { if (gr_reg_to_cpu_user_regs_index[i].reg == regnum) { reg = *(unsigned long*)(((char*)regs) + gr_reg_to_cpu_user_regs_index[i].ptregoff); break; } } if (i == gr_reg_to_cpu_user_regs_index_max) { goto out_err; } } else if (IA64_BR0_REGNUM <= regnum && regnum <= IA64_BR0_REGNUM + 7) { for (i = 0; i < br_reg_to_cpu_user_regs_index_max; i++) { if (br_reg_to_cpu_user_regs_index[i].reg == regnum) { reg = *(unsigned long*)(((char*)regs) + br_reg_to_cpu_user_regs_index[i].ptregoff); break; } } if (i == br_reg_to_cpu_user_regs_index_max) { goto out_err; } } else if (IA64_FR0_REGNUM + 6 <= regnum && regnum <= IA64_FR0_REGNUM + 11) { for (i = 0; i < fr_reg_to_cpu_user_regs_index_max; i++) { if (fr_reg_to_cpu_user_regs_index[i].reg == regnum) { reg = *(unsigned long*)(((char*)regs) + fr_reg_to_cpu_user_regs_index[i].ptregoff); break; } } if (i == fr_reg_to_cpu_user_regs_index_max) { goto out_err; } } else if (regnum == IA64_CSD_REGNUM) { reg = regs->ar_csd; } else if (regnum == IA64_SSD_REGNUM) { reg = regs->ar_ssd; } else if (regnum == IA64_PSR_REGNUM) { reg = regs->cr_ipsr; } else if (regnum == IA64_IP_REGNUM) { reg = regs->cr_iip; } else if (regnum == IA64_CFM_REGNUM) { reg = regs->cr_ifs; } else if (regnum == IA64_UNAT_REGNUM) { reg = regs->ar_unat; } else if (regnum == IA64_PFS_REGNUM) { reg = regs->ar_pfs; } else if (regnum == IA64_RSC_REGNUM) { reg = regs->ar_rsc; } else if (regnum == IA64_RNAT_REGNUM) { reg = regs->ar_rnat; } else if (regnum == IA64_BSPSTORE_REGNUM) { reg = regs->ar_bspstore; } else if (regnum == IA64_PR_REGNUM) { reg = regs->pr; } else if (regnum == IA64_FPSR_REGNUM) { reg = regs->ar_fpsr; } else if (regnum == IA64_CCV_REGNUM) { reg = regs->ar_ccv; } else { // emul_unat, rfi_pfs goto out_err; } dbg_printk("Register read regnum = 0x%lx, val = 0x%lx\n", regnum, reg); snprintf(buf, sizeof(buf), "%.08lx", swab64(reg)); out: return gdb_send_reply(buf, ctx); out_err: dbg_printk("Register read unsupported regnum = 0x%lx\n", regnum); safe_strcpy(buf, "x"); goto out; }
static void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, int align_ctl) { struct thread_info *info = current_thread_info(); struct unaligned_jit_fragment frag; struct unaligned_jit_fragment *jit_code_area; tilegx_bundle_bits bundle_2 = 0; /* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */ bool bundle_2_enable = true; uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1; /* * Indicate if the unalign access * instruction's registers hit with * others in the same bundle. */ bool alias = false; bool load_n_store = true; bool load_store_signed = false; unsigned int load_store_size = 8; bool y1_br = false; /* True, for a branch in same bundle at Y1.*/ int y1_br_reg = 0; /* True for link operation. i.e. jalr or lnk at Y1 */ bool y1_lr = false; int y1_lr_reg = 0; bool x1_add = false;/* True, for load/store ADD instruction at X1*/ int x1_add_imm8 = 0; bool unexpected = false; int n = 0, k; jit_code_area = (struct unaligned_jit_fragment *)(info->unalign_jit_base); memset((void *)&frag, 0, sizeof(frag)); /* 0: X mode, Otherwise: Y mode. */ if (bundle & TILEGX_BUNDLE_MODE_MASK) { unsigned int mod, opcode; if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 && get_RRROpcodeExtension_Y1(bundle) == UNARY_RRR_1_OPCODE_Y1) { opcode = get_UnaryOpcodeExtension_Y1(bundle); /* * Test "jalr", "jalrp", "jr", "jrp" instruction at Y1 * pipeline. */ switch (opcode) { case JALR_UNARY_OPCODE_Y1: case JALRP_UNARY_OPCODE_Y1: y1_lr = true; y1_lr_reg = 55; /* Link register. */ /* FALLTHROUGH */ case JR_UNARY_OPCODE_Y1: case JRP_UNARY_OPCODE_Y1: y1_br = true; y1_br_reg = get_SrcA_Y1(bundle); break; case LNK_UNARY_OPCODE_Y1: /* "lnk" at Y1 pipeline. */ y1_lr = true; y1_lr_reg = get_Dest_Y1(bundle); break; } } opcode = get_Opcode_Y2(bundle); mod = get_Mode(bundle); /* * bundle_2 is bundle after making Y2 as a dummy operation * - ld zero, sp */ bundle_2 = (bundle & (~GX_INSN_Y2_MASK)) | jit_y2_dummy(); /* Make Y1 as fnop if Y1 is a branch or lnk operation. */ if (y1_br || y1_lr) { bundle_2 &= ~(GX_INSN_Y1_MASK); bundle_2 |= jit_y1_fnop(); } if (is_y0_y1_nop(bundle_2)) bundle_2_enable = false; if (mod == MODE_OPCODE_YC2) { /* Store. */ load_n_store = false; load_store_size = 1 << opcode; load_store_signed = false; find_regs(bundle, 0, &ra, &rb, &clob1, &clob2, &clob3, &alias); if (load_store_size > 8) unexpected = true; } else { /* Load. */ load_n_store = true; if (mod == MODE_OPCODE_YB2) { switch (opcode) { case LD_OPCODE_Y2: load_store_signed = false; load_store_size = 8; break; case LD4S_OPCODE_Y2: load_store_signed = true; load_store_size = 4; break; case LD4U_OPCODE_Y2: load_store_signed = false; load_store_size = 4; break; default: unexpected = true; } } else if (mod == MODE_OPCODE_YA2) { if (opcode == LD2S_OPCODE_Y2) { load_store_signed = true; load_store_size = 2; } else if (opcode == LD2U_OPCODE_Y2) { load_store_signed = false; load_store_size = 2; } else unexpected = true; } else unexpected = true; find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2, &clob3, &alias); } } else { unsigned int opcode; /* bundle_2 is bundle after making X1 as "fnop". */ bundle_2 = (bundle & (~GX_INSN_X1_MASK)) | jit_x1_fnop(); if (is_x0_x1_nop(bundle_2)) bundle_2_enable = false; if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) { opcode = get_UnaryOpcodeExtension_X1(bundle); if (get_RRROpcodeExtension_X1(bundle) == UNARY_RRR_0_OPCODE_X1) { load_n_store = true; find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2, &clob3, &alias); switch (opcode) { case LD_UNARY_OPCODE_X1: load_store_signed = false; load_store_size = 8; break; case LD4S_UNARY_OPCODE_X1: load_store_signed = true; /* FALLTHROUGH */ case LD4U_UNARY_OPCODE_X1: load_store_size = 4; break; case LD2S_UNARY_OPCODE_X1: load_store_signed = true; /* FALLTHROUGH */ case LD2U_UNARY_OPCODE_X1: load_store_size = 2; break; default: unexpected = true; } } else { load_n_store = false; load_store_signed = false; find_regs(bundle, 0, &ra, &rb, &clob1, &clob2, &clob3, &alias); opcode = get_RRROpcodeExtension_X1(bundle); switch (opcode) { case ST_RRR_0_OPCODE_X1: load_store_size = 8; break; case ST4_RRR_0_OPCODE_X1: load_store_size = 4; break; case ST2_RRR_0_OPCODE_X1: load_store_size = 2; break; default: unexpected = true; } } } else if (get_Opcode_X1(bundle) == IMM8_OPCODE_X1) { load_n_store = true; opcode = get_Imm8OpcodeExtension_X1(bundle); switch (opcode) { case LD_ADD_IMM8_OPCODE_X1: load_store_size = 8; break; case LD4S_ADD_IMM8_OPCODE_X1: load_store_signed = true; /* FALLTHROUGH */ case LD4U_ADD_IMM8_OPCODE_X1: load_store_size = 4; break; case LD2S_ADD_IMM8_OPCODE_X1: load_store_signed = true; /* FALLTHROUGH */ case LD2U_ADD_IMM8_OPCODE_X1: load_store_size = 2; break; case ST_ADD_IMM8_OPCODE_X1: load_n_store = false; load_store_size = 8; break; case ST4_ADD_IMM8_OPCODE_X1: load_n_store = false; load_store_size = 4; break; case ST2_ADD_IMM8_OPCODE_X1: load_n_store = false; load_store_size = 2; break; default: unexpected = true; } if (!unexpected) { x1_add = true; if (load_n_store) x1_add_imm8 = get_Imm8_X1(bundle); else x1_add_imm8 = get_Dest_Imm8_X1(bundle); } find_regs(bundle, load_n_store ? (&rd) : NULL, &ra, &rb, &clob1, &clob2, &clob3, &alias); } else unexpected = true; } /* * Some sanity check for register numbers extracted from fault bundle. */ if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true) unexpected = true; /* Give warning if register ra has an aligned address. */ if (!unexpected) WARN_ON(!((load_store_size - 1) & (regs->regs[ra]))); /* * Fault came from kernel space, here we only need take care of * unaligned "get_user/put_user" macros defined in "uaccess.h". * Basically, we will handle bundle like this: * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0} * (Refer to file "arch/tile/include/asm/uaccess.h" for details). * For either load or store, byte-wise operation is performed by calling * get_user() or put_user(). If the macro returns non-zero value, * set the value to rx, otherwise set zero to rx. Finally make pc point * to next bundle and return. */ if (EX1_PL(regs->ex1) != USER_PL) { unsigned long rx = 0; unsigned long x = 0, ret = 0; if (y1_br || y1_lr || x1_add || (load_store_signed != (load_n_store && load_store_size == 4))) { /* No branch, link, wrong sign-ext or load/store add. */ unexpected = true; } else if (!unexpected) { if (bundle & TILEGX_BUNDLE_MODE_MASK) { /* * Fault bundle is Y mode. * Check if the Y1 and Y0 is the form of * { movei rx, 0; nop/fnop }, if yes, * find the rx. */ if ((get_Opcode_Y1(bundle) == ADDI_OPCODE_Y1) && (get_SrcA_Y1(bundle) == TREG_ZERO) && (get_Imm8_Y1(bundle) == 0) && is_bundle_y0_nop(bundle)) { rx = get_Dest_Y1(bundle); } else if ((get_Opcode_Y0(bundle) == ADDI_OPCODE_Y0) && (get_SrcA_Y0(bundle) == TREG_ZERO) && (get_Imm8_Y0(bundle) == 0) && is_bundle_y1_nop(bundle)) { rx = get_Dest_Y0(bundle); } else { unexpected = true; } } else { /* * Fault bundle is X mode. * Check if the X0 is 'movei rx, 0', * if yes, find the rx. */ if ((get_Opcode_X0(bundle) == IMM8_OPCODE_X0) && (get_Imm8OpcodeExtension_X0(bundle) == ADDI_IMM8_OPCODE_X0) && (get_SrcA_X0(bundle) == TREG_ZERO) && (get_Imm8_X0(bundle) == 0)) { rx = get_Dest_X0(bundle); } else { unexpected = true; } } /* rx should be less than 56. */ if (!unexpected && (rx >= 56)) unexpected = true; } if (!search_exception_tables(regs->pc)) { /* No fixup in the exception tables for the pc. */ unexpected = true; } if (unexpected) { /* Unexpected unalign kernel fault. */ struct task_struct *tsk = validate_current(); bust_spinlocks(1); show_regs(regs); if (unlikely(tsk->pid < 2)) { panic("Kernel unalign fault running %s!", tsk->pid ? "init" : "the idle task"); } #ifdef SUPPORT_DIE die("Oops", regs); #endif bust_spinlocks(1); do_group_exit(SIGKILL); } else { unsigned long i, b = 0; unsigned char *ptr = (unsigned char *)regs->regs[ra]; if (load_n_store) { /* handle get_user(x, ptr) */ for (i = 0; i < load_store_size; i++) { ret = get_user(b, ptr++); if (!ret) { /* Success! update x. */ #ifdef __LITTLE_ENDIAN x |= (b << (8 * i)); #else x <<= 8; x |= b; #endif /* __LITTLE_ENDIAN */ } else { x = 0; break; } } /* Sign-extend 4-byte loads. */ if (load_store_size == 4) x = (long)(int)x; /* Set register rd. */ regs->regs[rd] = x; /* Set register rx. */ regs->regs[rx] = ret; /* Bump pc. */ regs->pc += 8; } else { /* Handle put_user(x, ptr) */ x = regs->regs[rb]; #ifdef __LITTLE_ENDIAN b = x; #else /* * Swap x in order to store x from low * to high memory same as the * little-endian case. */ switch (load_store_size) { case 8: b = swab64(x); break; case 4: b = swab32(x); break; case 2: b = swab16(x); break; } #endif /* __LITTLE_ENDIAN */ for (i = 0; i < load_store_size; i++) { ret = put_user(b, ptr++); if (ret) break; /* Success! shift 1 byte. */ b >>= 8; } /* Set register rx. */ regs->regs[rx] = ret; /* Bump pc. */ regs->pc += 8; } } unaligned_fixup_count++; if (unaligned_printk) { pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n", current->comm, current->pid, regs->regs[ra]); } /* Done! Return to the exception handler. */ return; }
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { u64 uninitialized_var(gpr); if (run->mmio.len > sizeof(gpr)) { printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); return; } if (!vcpu->arch.mmio_host_swabbed) { switch (run->mmio.len) { case 8: gpr = *(u64 *)run->mmio.data; break; case 4: gpr = *(u32 *)run->mmio.data; break; case 2: gpr = *(u16 *)run->mmio.data; break; case 1: gpr = *(u8 *)run->mmio.data; break; } } else { switch (run->mmio.len) { case 8: gpr = swab64(*(u64 *)run->mmio.data); break; case 4: gpr = swab32(*(u32 *)run->mmio.data); break; case 2: gpr = swab16(*(u16 *)run->mmio.data); break; case 1: gpr = *(u8 *)run->mmio.data; break; } } if (vcpu->arch.mmio_sign_extend) { switch (run->mmio.len) { #ifdef CONFIG_PPC64 case 4: gpr = (s64)(s32)gpr; break; #endif case 2: gpr = (s64)(s16)gpr; break; case 1: gpr = (s64)(s8)gpr; break; } } kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { case KVM_MMIO_REG_GPR: kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); break; case KVM_MMIO_REG_FPR: VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; break; #ifdef CONFIG_PPC_BOOK3S case KVM_MMIO_REG_QPR: vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; case KVM_MMIO_REG_FQPR: VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #endif default: BUG(); } }