/* Assemble the body code between the prologue & epilogue. */ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, struct codegen_context *ctx, unsigned int *addrs) { const struct sock_filter *filter = fp->insns; int flen = fp->len; u8 *func; unsigned int true_cond; int i; /* Start of epilogue code */ unsigned int exit_addr = addrs[flen]; for (i = 0; i < flen; i++) { unsigned int K = filter[i].k; /* * addrs[] maps a BPF bytecode address into a real offset from * the start of the body code. */ addrs[i] = ctx->idx * 4; switch (filter[i].code) { /*** ALU ops ***/ case BPF_S_ALU_ADD_X: /* A += X; */ ctx->seen |= SEEN_XREG; PPC_ADD(r_A, r_A, r_X); break; case BPF_S_ALU_ADD_K: /* A += K; */ if (!K) break; PPC_ADDI(r_A, r_A, IMM_L(K)); if (K >= 32768) PPC_ADDIS(r_A, r_A, IMM_HA(K)); break; case BPF_S_ALU_SUB_X: /* A -= X; */ ctx->seen |= SEEN_XREG; PPC_SUB(r_A, r_A, r_X); break; case BPF_S_ALU_SUB_K: /* A -= K */ if (!K) break; PPC_ADDI(r_A, r_A, IMM_L(-K)); if (K >= 32768) PPC_ADDIS(r_A, r_A, IMM_HA(-K)); break; case BPF_S_ALU_MUL_X: /* A *= X; */ ctx->seen |= SEEN_XREG; PPC_MUL(r_A, r_A, r_X); break; case BPF_S_ALU_MUL_K: /* A *= K */ if (K < 32768) PPC_MULI(r_A, r_A, K); else { PPC_LI32(r_scratch1, K); PPC_MUL(r_A, r_A, r_scratch1); } break; case BPF_S_ALU_DIV_X: /* A /= X; */ ctx->seen |= SEEN_XREG; PPC_CMPWI(r_X, 0); if (ctx->pc_ret0 != -1) { PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); } else { /* * Exit, returning 0; first pass hits here * (longer worst-case code size). */ PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); PPC_LI(r_ret, 0); PPC_JMP(exit_addr); } PPC_DIVWU(r_A, r_A, r_X); break; case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ PPC_LI32(r_scratch1, K); /* Top 32 bits of 64bit result -> A */ PPC_MULHWU(r_A, r_A, r_scratch1); break; case BPF_S_ALU_AND_X: ctx->seen |= SEEN_XREG; PPC_AND(r_A, r_A, r_X); break; case BPF_S_ALU_AND_K: if (!IMM_H(K)) PPC_ANDI(r_A, r_A, K); else { PPC_LI32(r_scratch1, K); PPC_AND(r_A, r_A, r_scratch1); } break; case BPF_S_ALU_OR_X: ctx->seen |= SEEN_XREG; PPC_OR(r_A, r_A, r_X); break; case BPF_S_ALU_OR_K: if (IMM_L(K)) PPC_ORI(r_A, r_A, IMM_L(K)); if (K >= 65536) PPC_ORIS(r_A, r_A, IMM_H(K)); break; case BPF_S_ALU_LSH_X: /* A <<= X; */ ctx->seen |= SEEN_XREG; PPC_SLW(r_A, r_A, r_X); break; case BPF_S_ALU_LSH_K: if (K == 0) break; else PPC_SLWI(r_A, r_A, K); break; case BPF_S_ALU_RSH_X: /* A >>= X; */ ctx->seen |= SEEN_XREG; PPC_SRW(r_A, r_A, r_X); break; case BPF_S_ALU_RSH_K: /* A >>= K; */ if (K == 0) break; else PPC_SRWI(r_A, r_A, K); break; case BPF_S_ALU_NEG: PPC_NEG(r_A, r_A); break; case BPF_S_RET_K: PPC_LI32(r_ret, K); if (!K) { if (ctx->pc_ret0 == -1) ctx->pc_ret0 = i; } /* * If this isn't the very last instruction, branch to * the epilogue if we've stuff to clean up. Otherwise, * if there's nothing to tidy, just return. If we /are/ * the last instruction, we're about to fall through to * the epilogue to return. */ if (i != flen - 1) { /* * Note: 'seen' is properly valid only on pass * #2. Both parts of this conditional are the * same instruction size though, meaning the * first pass will still correctly determine the * code size/addresses. */ if (ctx->seen) PPC_JMP(exit_addr); else PPC_BLR(); } break; case BPF_S_RET_A: PPC_MR(r_ret, r_A); if (i != flen - 1) { if (ctx->seen) PPC_JMP(exit_addr); else PPC_BLR(); } break; case BPF_S_MISC_TAX: /* X = A */ PPC_MR(r_X, r_A); break; case BPF_S_MISC_TXA: /* A = X */ ctx->seen |= SEEN_XREG; PPC_MR(r_A, r_X); break; /*** Constant loads/M[] access ***/ case BPF_S_LD_IMM: /* A = K */ PPC_LI32(r_A, K); break; case BPF_S_LDX_IMM: /* X = K */ PPC_LI32(r_X, K); break; case BPF_S_LD_MEM: /* A = mem[K] */ PPC_MR(r_A, r_M + (K & 0xf)); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; case BPF_S_LDX_MEM: /* X = mem[K] */ PPC_MR(r_X, r_M + (K & 0xf)); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; case BPF_S_ST: /* mem[K] = A */ PPC_MR(r_M + (K & 0xf), r_A); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; case BPF_S_STX: /* mem[K] = X */ PPC_MR(r_M + (K & 0xf), r_X); ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); break; case BPF_S_LD_W_LEN: /* A = skb->len; */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); break; case BPF_S_LDX_W_LEN: /* X = skb->len; */ PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); break; /*** Ancillary info loads ***/ /* None of the BPF_S_ANC* codes appear to be passed by * sk_chk_filter(). The interpreter and the x86 BPF * compiler implement them so we do too -- they may be * planted in future. */ case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, protocol)); /* ntohs is a NOP with BE loads. */ break; case BPF_S_ANC_IFINDEX: PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, dev)); PPC_CMPDI(r_scratch1, 0); if (ctx->pc_ret0 != -1) { PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); } else { /* Exit, returning 0; first pass hits here. */ PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); PPC_LI(r_ret, 0); PPC_JMP(exit_addr); } BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); PPC_LWZ_OFFS(r_A, r_scratch1, offsetof(struct net_device, ifindex)); break; case BPF_S_ANC_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, mark)); break; case BPF_S_ANC_RXHASH: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, rxhash)); break; case BPF_S_ANC_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, queue_mapping)); break; case BPF_S_ANC_CPU: #ifdef CONFIG_SMP /* * PACA ptr is r13: * raw_smp_processor_id() = local_paca->paca_index */ BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); PPC_LHZ_OFFS(r_A, 13, offsetof(struct paca_struct, paca_index)); #else PPC_LI(r_A, 0); #endif break; /*** Absolute loads from packet header/data ***/ case BPF_S_LD_W_ABS: func = sk_load_word; goto common_load; case BPF_S_LD_H_ABS: func = sk_load_half; goto common_load; case BPF_S_LD_B_ABS: func = sk_load_byte; common_load: /* * Load from [K]. Reference with the (negative) * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported. */ ctx->seen |= SEEN_DATAREF; if ((int)K < 0) return -ENOTSUPP; PPC_LI64(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_LI32(r_addr, K); PPC_BLRL(); /* * Helper returns 'lt' condition on error, and an * appropriate return value in r3 */ PPC_BCC(COND_LT, exit_addr); break; /*** Indirect loads from packet header/data ***/ case BPF_S_LD_W_IND: func = sk_load_word; goto common_load_ind; case BPF_S_LD_H_IND: func = sk_load_half; goto common_load_ind; case BPF_S_LD_B_IND: func = sk_load_byte; common_load_ind: /* * Load from [X + K]. Negative offsets are tested for * in the helper functions, and result in a 'ret 0'. */ ctx->seen |= SEEN_DATAREF | SEEN_XREG; PPC_LI64(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_ADDI(r_addr, r_X, IMM_L(K)); if (K >= 32768) PPC_ADDIS(r_addr, r_addr, IMM_HA(K)); PPC_BLRL(); /* If error, cr0.LT set */ PPC_BCC(COND_LT, exit_addr); break; case BPF_S_LDX_B_MSH: /* * x86 version drops packet (RET 0) when K<0, whereas * interpreter does allow K<0 (__load_pointer, special * ancillary data). common_load returns ENOTSUPP if K<0, * so we fall back to interpreter & filter works. */ func = sk_load_byte_msh; goto common_load; break; /*** Jump and branches ***/ case BPF_S_JMP_JA: if (K != 0) PPC_JMP(addrs[i + 1 + K]); break; case BPF_S_JMP_JGT_K: case BPF_S_JMP_JGT_X: true_cond = COND_GT; goto cond_branch; case BPF_S_JMP_JGE_K: case BPF_S_JMP_JGE_X: true_cond = COND_GE; goto cond_branch; case BPF_S_JMP_JEQ_K: case BPF_S_JMP_JEQ_X: true_cond = COND_EQ; goto cond_branch; case BPF_S_JMP_JSET_K: case BPF_S_JMP_JSET_X: true_cond = COND_NE; /* Fall through */ cond_branch: /* same targets, can avoid doing the test :) */ if (filter[i].jt == filter[i].jf) { if (filter[i].jt > 0) PPC_JMP(addrs[i + 1 + filter[i].jt]); break; } switch (filter[i].code) { case BPF_S_JMP_JGT_X: case BPF_S_JMP_JGE_X: case BPF_S_JMP_JEQ_X: ctx->seen |= SEEN_XREG; PPC_CMPLW(r_A, r_X); break; case BPF_S_JMP_JSET_X: ctx->seen |= SEEN_XREG; PPC_AND_DOT(r_scratch1, r_A, r_X); break; case BPF_S_JMP_JEQ_K: case BPF_S_JMP_JGT_K: case BPF_S_JMP_JGE_K: if (K < 32768) PPC_CMPLWI(r_A, K); else { PPC_LI32(r_scratch1, K); PPC_CMPLW(r_A, r_scratch1); } break; case BPF_S_JMP_JSET_K: if (K < 32768) /* PPC_ANDI is /only/ dot-form */ PPC_ANDI(r_scratch1, r_A, K); else { PPC_LI32(r_scratch1, K); PPC_AND_DOT(r_scratch1, r_A, r_scratch1); } break; } /* Sometimes branches are constructed "backward", with * the false path being the branch and true path being * a fallthrough to the next instruction. */ if (filter[i].jt == 0) /* Swap the sense of the branch */ PPC_BCC(true_cond ^ COND_CMP_TRUE, addrs[i + 1 + filter[i].jf]); else { PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]); if (filter[i].jf != 0) PPC_JMP(addrs[i + 1 + filter[i].jf]); } break; default: /* The filter contains something cruel & unusual. * We don't handle it, but also there shouldn't be * anything missing from our list. */ if (printk_ratelimit()) pr_err("BPF filter opcode %04x (@%d) unsupported\n", filter[i].code, i); return -ENOTSUPP; } } /* Set end-of-body-code address for exit. */ addrs[i] = ctx->idx * 4; return 0; }
* up at all. I'd be curious if other chips differed. */ #if __MWERKS__ < 0x800 #include "ppcasm.h" /* PowerPC assembler */ /* * MulN1 expects (*out, *in, len, k), count >= 1 * r3 r4 r5 r6 */ static const unsigned mulN1[] = { PPC_LWZ(7,4,0), /* Load first word of in in r7 */ PPC_MULLW(8,7,6), /* Low half of multiply in r8 */ PPC_MTCTR(5), /* Move len into CTR */ PPC_ADDIC(0,0,0), /* Clear carry bit for loop */ PPC_MULHWU(5,7,6), /* High half of multiply in r5 */ PPC_STW(8,3,0), PPC_BC(18,31,7), /* Branch to Label if --ctr == 0 */ /* Loop: */ PPC_LWZU(7,4,4), /* r7 = *++in */ PPC_MULLW(8,7,6), /* r8 = low word of product */ PPC_ADDE(8,8,5), /* Add carry word r5 and bit CF to r8 */ PPC_STWU(8,3,4), /* *++out = r8 */ PPC_MULHWU(5,7,6), /* r5 is high word of product, for carry word */ PPC_BC(16,31,-5), /* Branch to Loop if --ctr != 0 */ /* Label: */ PPC_ADDZE(5,5), /* Add carry flag to r5 */ PPC_STW(5,3,4), /* out[1] = r5 */ PPC_BLR() };