Пример #1
0
static void
decode_bpf_code(uint16_t code)
{
	uint16_t i = code & ~BPF_CLASS(code);

	printxval(bpf_class, BPF_CLASS(code), "BPF_???");
	switch (BPF_CLASS(code)) {
		case BPF_LD:
		case BPF_LDX:
			tprints(" | ");
			printxval(bpf_size, BPF_SIZE(code), "BPF_???");
			tprints(" | ");
			printxval(bpf_mode, BPF_MODE(code), "BPF_???");
			break;
		case BPF_ST:
		case BPF_STX:
			if (i)
				tprintf(" | %#x /* %s */", i, "BPF_???");
			break;
		case BPF_ALU:
			tprints(" | ");
			printxval(bpf_src, BPF_SRC(code), "BPF_???");
			tprints(" | ");
			printxval(bpf_op_alu, BPF_OP(code), "BPF_???");
			break;
		case BPF_JMP:
			tprints(" | ");
			printxval(bpf_src, BPF_SRC(code), "BPF_???");
			tprints(" | ");
			printxval(bpf_op_jmp, BPF_OP(code), "BPF_???");
			break;
		case BPF_RET:
			tprints(" | ");
			printxval(bpf_rval, BPF_RVAL(code), "BPF_???");
			i &= ~BPF_RVAL(code);
			if (i)
				tprintf(" | %#x /* %s */", i, "BPF_???");
			break;
		case BPF_MISC:
			tprints(" | ");
			printxval(bpf_miscop, BPF_MISCOP(code), "BPF_???");
			i &= ~BPF_MISCOP(code);
			if (i)
				tprintf(" | %#x /* %s */", i, "BPF_???");
			break;
	}

}
Пример #2
0
static void
emit_shift(struct bpf_jit_state *st, uint32_t op, uint32_t dreg)
{
	uint8_t mod;
	uint32_t bop, opx;

	static const uint8_t ops[] = {0xC1, 0xD3};
	static const uint8_t mods[] = {
		[GET_BPF_OP(BPF_LSH)] = 4,
		[GET_BPF_OP(BPF_RSH)] = 5,
		[GET_BPF_OP(EBPF_ARSH)] = 7,
	};

	bop = GET_BPF_OP(op);
	mod = mods[bop];
	opx = (BPF_SRC(op) == BPF_X);

	emit_rex(st, op, 0, dreg);
	emit_bytes(st, &ops[opx], sizeof(ops[opx]));
	emit_modregrm(st, MOD_DIRECT, mod, dreg);
}
Пример #3
0
/*
 * Return the register number that is used by s.  If A and X are both
 * used, return AX_ATOM.  If no register is used, return -1.
 *
 * The implementation should probably change to an array access.
 */
static int
atomuse(struct stmt *s)
{
	register int c = s->code;

	if (c == NOP)
		return -1;

	switch (BPF_CLASS(c)) {

	case BPF_RET:
		return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
			(BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;

	case BPF_LD:
	case BPF_LDX:
		return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
			(BPF_MODE(c) == BPF_MEM) ? s->k : -1;

	case BPF_ST:
		return A_ATOM;

	case BPF_STX:
		return X_ATOM;

	case BPF_JMP:
	case BPF_ALU:
		if (BPF_SRC(c) == BPF_X)
			return AX_ATOM;
		return A_ATOM;

	case BPF_MISC:
		return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
	}
	abort();
	/* NOTREACHED */
}
Пример #4
0
/*
 * note that rax:rdx are implicitly used as source/destination registers,
 * so some reg spillage is necessary.
 * emit:
 * mov %rax, %r11
 * mov %rdx, %r10
 * mov %<dreg>, %rax
 * either:
 *   mov %<sreg>, %rdx
 * OR
 *   mov <imm>, %rdx
 * mul %rdx
 * mov %r10, %rdx
 * mov %rax, %<dreg>
 * mov %r11, %rax
 */
static void
emit_mul(struct bpf_jit_state *st, uint32_t op, uint32_t sreg, uint32_t dreg,
	uint32_t imm)
{
	const uint8_t ops = 0xF7;
	const uint8_t mods = 4;

	/* save rax & rdx */
	emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RAX, REG_TMP0);
	emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RDX, REG_TMP1);

	/* rax = dreg */
	emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, dreg, RAX);

	if (BPF_SRC(op) == BPF_X)
		/* rdx = sreg */
		emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X,
			sreg == RAX ? REG_TMP0 : sreg, RDX);
	else
		/* rdx = imm */
		emit_mov_imm(st, EBPF_ALU64 | EBPF_MOV | BPF_K, RDX, imm);

	emit_rex(st, op, RAX, RDX);
	emit_bytes(st, &ops, sizeof(ops));
	emit_modregrm(st, MOD_DIRECT, mods, RDX);

	if (dreg != RDX)
		/* restore rdx */
		emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, REG_TMP1, RDX);

	if (dreg != RAX) {
		/* dreg = rax */
		emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RAX, dreg);
		/* restore rax */
		emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, REG_TMP0, RAX);
	}
}
Пример #5
0
static void
opt_peep(struct block *b)
{
	struct slist *s;
	struct slist *next, *last;
	int val;

	s = b->stmts;
	if (s == 0)
		return;

	last = s;
	for (/*empty*/; /*empty*/; s = next) {
		/*
		 * Skip over nops.
		 */
		s = this_op(s);
		if (s == 0)
			break;	/* nothing left in the block */

		/*
		 * Find the next real instruction after that one
		 * (skipping nops).
		 */
		next = this_op(s->next);
		if (next == 0)
			break;	/* no next instruction */
		last = next;

		/*
		 * st  M[k]	-->	st  M[k]
		 * ldx M[k]		tax
		 */
		if (s->s.code == BPF_ST &&
		    next->s.code == (BPF_LDX|BPF_MEM) &&
		    s->s.k == next->s.k) {
			done = 0;
			next->s.code = BPF_MISC|BPF_TAX;
		}
		/*
		 * ld  #k	-->	ldx  #k
		 * tax			txa
		 */
		if (s->s.code == (BPF_LD|BPF_IMM) &&
		    next->s.code == (BPF_MISC|BPF_TAX)) {
			s->s.code = BPF_LDX|BPF_IMM;
			next->s.code = BPF_MISC|BPF_TXA;
			done = 0;
		}
		/*
		 * This is an ugly special case, but it happens
		 * when you say tcp[k] or udp[k] where k is a constant.
		 */
		if (s->s.code == (BPF_LD|BPF_IMM)) {
			struct slist *add, *tax, *ild;

			/*
			 * Check that X isn't used on exit from this
			 * block (which the optimizer might cause).
			 * We know the code generator won't generate
			 * any local dependencies.
			 */
			if (ATOMELEM(b->out_use, X_ATOM))
				continue;

			/*
			 * Check that the instruction following the ldi
			 * is an addx, or it's an ldxms with an addx
			 * following it (with 0 or more nops between the
			 * ldxms and addx).
			 */
			if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
				add = next;
			else
				add = this_op(next->next);
			if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
				continue;

			/*
			 * Check that a tax follows that (with 0 or more
			 * nops between them).
			 */
			tax = this_op(add->next);
			if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
				continue;

			/*
			 * Check that an ild follows that (with 0 or more
			 * nops between them).
			 */
			ild = this_op(tax->next);
			if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
			    BPF_MODE(ild->s.code) != BPF_IND)
				continue;
			/*
			 * We want to turn this sequence:
			 *
			 * (004) ldi     #0x2		{s}
			 * (005) ldxms   [14]		{next}  -- optional
			 * (006) addx			{add}
			 * (007) tax			{tax}
			 * (008) ild     [x+0]		{ild}
			 *
			 * into this sequence:
			 *
			 * (004) nop
			 * (005) ldxms   [14]
			 * (006) nop
			 * (007) nop
			 * (008) ild     [x+2]
			 *
			 * XXX We need to check that X is not
			 * subsequently used, because we want to change
			 * what'll be in it after this sequence.
			 *
			 * We know we can eliminate the accumulator
			 * modifications earlier in the sequence since
			 * it is defined by the last stmt of this sequence
			 * (i.e., the last statement of the sequence loads
			 * a value into the accumulator, so we can eliminate
			 * earlier operations on the accumulator).
			 */
			ild->s.k += s->s.k;
			s->s.code = NOP;
			add->s.code = NOP;
			tax->s.code = NOP;
			done = 0;
		}
	}
	/*
	 * If the comparison at the end of a block is an equality
	 * comparison against a constant, and nobody uses the value
	 * we leave in the A register at the end of a block, and
	 * the operation preceding the comparison is an arithmetic
	 * operation, we can sometime optimize it away.
	 */
	if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
	    !ATOMELEM(b->out_use, A_ATOM)) {
	    	/*
	    	 * We can optimize away certain subtractions of the
	    	 * X register.
	    	 */
		if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
			val = b->val[X_ATOM];
			if (vmap[val].is_const) {
				/*
				 * If we have a subtract to do a comparison,
				 * and the X register is a known constant,
				 * we can merge this value into the
				 * comparison:
				 *
				 * sub x  ->	nop
				 * jeq #y	jeq #(x+y)
				 */
				b->s.k += vmap[val].const_val;
				last->s.code = NOP;
				done = 0;
			} else if (b->s.k == 0) {
				/*
				 * If the X register isn't a constant,
				 * and the comparison in the test is
				 * against 0, we can compare with the
				 * X register, instead:
				 *
				 * sub x  ->	nop
				 * jeq #0	jeq x
				 */
				last->s.code = NOP;
				b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
				done = 0;
			}
		}
		/*
		 * Likewise, a constant subtract can be simplified:
		 *
		 * sub #x ->	nop
		 * jeq #y ->	jeq #(x+y)
		 */
		else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
			last->s.code = NOP;
			b->s.k += last->s.k;
			done = 0;
		}
		/*
		 * And, similarly, a constant AND can be simplified
		 * if we're testing against 0, i.e.:
		 *
		 * and #k	nop
		 * jeq #0  ->	jset #k
		 */
		else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
		    b->s.k == 0) {
			b->s.k = last->s.k;
			b->s.code = BPF_JMP|BPF_K|BPF_JSET;
			last->s.code = NOP;
			done = 0;
			opt_not(b);
		}
	}
	/*
	 * jset #0        ->   never
	 * jset #ffffffff ->   always
	 */
	if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
		if (b->s.k == 0)
			JT(b) = JF(b);
		if (b->s.k == (int)0xffffffff)
			JF(b) = JT(b);
	}
	/*
	 * If we're comparing against the index register, and the index
	 * register is a known constant, we can just compare against that
	 * constant.
	 */
	val = b->val[X_ATOM];
	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
		bpf_int32 v = vmap[val].const_val;
		b->s.code &= ~BPF_X;
		b->s.k = v;
	}
	/*
	 * If the accumulator is a known constant, we can compute the
	 * comparison result.
	 */
	val = b->val[A_ATOM];
	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
		bpf_int32 v = vmap[val].const_val;
		switch (BPF_OP(b->s.code)) {

		case BPF_JEQ:
			v = v == b->s.k;
			break;

		case BPF_JGT:
			v = (unsigned)v > (unsigned)b->s.k;
			break;

		case BPF_JGE:
			v = (unsigned)v >= (unsigned)b->s.k;
			break;

		case BPF_JSET:
			v &= b->s.k;
			break;

		default:
			abort();
		}
		if (JF(b) != JT(b))
			done = 0;
		if (v)
			JF(b) = JT(b);
		else
			JT(b) = JF(b);
	}
}
Пример #6
0
static void
opt_blk(struct block *b, int do_stmts)
{
	struct slist *s;
	struct edge *p;
	int i;
	bpf_int32 aval, xval;

#if 0
	for (s = b->stmts; s && s->next; s = s->next)
		if (BPF_CLASS(s->s.code) == BPF_JMP) {
			do_stmts = 0;
			break;
		}
#endif

	/*
	 * Initialize the atom values.
	 */
	p = b->in_edges;
	if (p == 0) {
		/*
		 * We have no predecessors, so everything is undefined
		 * upon entry to this block.
		 */
		memset((char *)b->val, 0, sizeof(b->val));
	} else {
		/*
		 * Inherit values from our predecessors.
		 *
		 * First, get the values from the predecessor along the
		 * first edge leading to this node.
		 */
		memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
		/*
		 * Now look at all the other nodes leading to this node.
		 * If, for the predecessor along that edge, a register
		 * has a different value from the one we have (i.e.,
		 * control paths are merging, and the merging paths
		 * assign different values to that register), give the
		 * register the undefined value of 0.
		 */
		while ((p = p->next) != NULL) {
			for (i = 0; i < N_ATOMS; ++i)
				if (b->val[i] != p->pred->val[i])
					b->val[i] = 0;
		}
	}
	aval = b->val[A_ATOM];
	xval = b->val[X_ATOM];
	for (s = b->stmts; s; s = s->next)
		opt_stmt(&s->s, b->val, do_stmts);

	/*
	 * This is a special case: if we don't use anything from this
	 * block, and we load the accumulator or index register with a
	 * value that is already there, or if this block is a return,
	 * eliminate all the statements.
	 *
	 * XXX - what if it does a store?
	 *
	 * XXX - why does it matter whether we use anything from this
	 * block?  If the accumulator or index register doesn't change
	 * its value, isn't that OK even if we use that value?
	 *
	 * XXX - if we load the accumulator with a different value,
	 * and the block ends with a conditional branch, we obviously
	 * can't eliminate it, as the branch depends on that value.
	 * For the index register, the conditional branch only depends
	 * on the index register value if the test is against the index
	 * register value rather than a constant; if nothing uses the
	 * value we put into the index register, and we're not testing
	 * against the index register's value, and there aren't any
	 * other problems that would keep us from eliminating this
	 * block, can we eliminate it?
	 */
	if (do_stmts &&
	    ((b->out_use == 0 && aval != 0 && b->val[A_ATOM] == aval &&
	      xval != 0 && b->val[X_ATOM] == xval) ||
	     BPF_CLASS(b->s.code) == BPF_RET)) {
		if (b->stmts != 0) {
			b->stmts = 0;
			done = 0;
		}
	} else {
		opt_peep(b);
		opt_deadstores(b);
	}
	/*
	 * Set up values for branch optimizer.
	 */
	if (BPF_SRC(b->s.code) == BPF_K)
		b->oval = K(b->s.k);
	else
		b->oval = b->val[X_ATOM];
	b->et.code = b->s.code;
	b->ef.code = -b->s.code;
}
Пример #7
0
static int bpf_anal(RAnal *anal, RAnalOp *op, ut64 addr,
	const ut8 *data, int len) {
	RBpfSockFilter *f = (RBpfSockFilter *)data;
	memset (op, '\0', sizeof (RAnalOp));
	op->jump = UT64_MAX;
	op->fail = UT64_MAX;
	op->ptr = op->val = UT64_MAX;
	op->type = R_ANAL_OP_TYPE_UNK;
	op->size = 8;
	op->addr = addr;

	r_strbuf_init (&op->esil);

	switch (f->code) {
	case BPF_RET | BPF_A:
		op->type = R_ANAL_OP_TYPE_RET;
		esilprintf (op, "A,R0,=,0,$");
		break;
	case BPF_RET | BPF_K:
	case BPF_RET | BPF_X:
		op->type = R_ANAL_OP_TYPE_RET;
		if (BPF_SRC (f->code) == BPF_K) {
			esilprintf (op, "%" PFMT64d ",R0,=,0,$", f->k);
		} else if (BPF_SRC (f->code) == BPF_X) {
			esilprintf (op, "X,R0,=,0,$");
		}
		break;
	case BPF_MISC_TAX:
		op->type = R_ANAL_OP_TYPE_MOV;
		SET_REG_SRC_DST (op, "A", "X");
		esilprintf (op, "A,X,=");
		break;
	case BPF_MISC_TXA:
		op->type = R_ANAL_OP_TYPE_MOV;
		SET_REG_SRC_DST (op, "X", "A");
		esilprintf (op, "X,A,=");
		break;
	case BPF_ST:
		if (INSIDE_M (f->k)) {
			op->type = R_ANAL_OP_TYPE_MOV;
			SET_REG_SRC_DST (op, "A", M[f->k]);
			esilprintf (op, "A,M[%" PFMT64d "],=", f->k);
		} else {
			op->type = R_ANAL_OP_TYPE_ILL;
		}
		break;
	case BPF_STX:
		if (INSIDE_M (f->k)) {
			op->type = R_ANAL_OP_TYPE_MOV;
			SET_REG_SRC_DST (op, "X", M[f->k]);
			esilprintf (op, "X,M[%" PFMT64d "],=", f->k);
		} else {
			op->type = R_ANAL_OP_TYPE_ILL;
		}
		break;
	case BPF_LD_W | BPF_LEN:
		op->type = R_ANAL_OP_TYPE_MOV;
		SET_REG_SRC_DST (op, "len", "A");
		esilprintf (op, "len,A,=", f->k);
		break;
	case BPF_LDX | BPF_LEN:
		op->type = R_ANAL_OP_TYPE_MOV;
		SET_REG_SRC_DST (op, "len", "X");
		esilprintf (op, "len,X,=", f->k);
		break;
	case BPF_LD_W | BPF_ABS:
		EMIT_LOAD (op, anal->gp + f->k, 4);
		SET_A_DST (op);
		esilprintf (op,
			"len,%" PFMT64d ",>,?{,0,R0,=,0,$,BREAK,},%" PFMT64d ",[4],A,=",
			f->k + 4, op->ptr);
		break;
	case BPF_LD_H | BPF_ABS:
		EMIT_LOAD (op, anal->gp + f->k, 2);
		SET_A_DST (op);
		esilprintf (op,
			"len,%" PFMT64d ",>,?{,0,R0,=,0,$,BREAK,},"
			"%" PFMT64d ",[2],A,=",
			f->k + 2, op->ptr);
		break;
	case BPF_LD_B | BPF_ABS:
		EMIT_LOAD (op, anal->gp + f->k, 1);
		SET_A_DST (op);
		esilprintf (op,
			"len,%" PFMT64d ",>,?{,0,R0,=,0,$,BREAK,},"
			"%" PFMT64d ",[1],A,=",
			f->k + 1, op->ptr);
		break;
	case BPF_LD_W | BPF_IND:
		op->type = R_ANAL_OP_TYPE_LOAD;
		op->ptrsize = 4;
		SET_A_DST (op);
		esilprintf (op,
			"len,%" PFMT64d ",X,+,0xffffffff,&,>,?{,0,R0,=,0,$,BREAK,},"
			"%" PFMT64d ",X,+,0xffffffff,&,[4],A,=",
			(st32)f->k + 4, anal->gp + (st32)f->k);
		break;
	case BPF_LD_H | BPF_IND:
		op->type = R_ANAL_OP_TYPE_LOAD;
		op->ptrsize = 2;
		SET_A_DST (op);
		esilprintf (op,
			"len,%" PFMT64d ",X,+,0xffffffff,&,>,?{,0,R0,=,0,$,BREAK,},"
			"%" PFMT64d ",X,+,0xffffffff,&,[2],A,=",
			(st32)f->k + 2, anal->gp + (st32)f->k);
		break;
	case BPF_LD_B | BPF_IND:
		op->type = R_ANAL_OP_TYPE_LOAD;
		op->ptrsize = 1;
		SET_A_DST (op);
		esilprintf (op,
			"len,%" PFMT64d ",X,+,0xffffffff,&,>,?{,0,R0,=,0,$,BREAK,},"
			"%" PFMT64d ",X,+,0xffffffff,&,[1],A,=",
			(st32)f->k + 1, anal->gp + (st32)f->k);
		break;
	case BPF_LD | BPF_IMM:
		op->type = R_ANAL_OP_TYPE_MOV;
		op->val = f->k;
		SET_REG_DST_IMM (op, "A", f->k);
		esilprintf (op, "0x%08" PFMT64x ",A,=", f->k);
		break;
	case BPF_LDX | BPF_IMM:
		op->type = R_ANAL_OP_TYPE_MOV;
		op->val = f->k;
		SET_REG_DST_IMM (op, "X", f->k);
		esilprintf (op, "0x%08" PFMT64x ",X,=", f->k);
		break;
	case BPF_LDX_B | BPF_MSH:
		op->type = R_ANAL_OP_TYPE_LOAD;
		op->ptrsize = 1;
		op->ptr = anal->gp + f->k;
		SET_A_DST (op);
		esilprintf (op, "%" PFMT64d ",[1],0xf,&,4,*,X,=", op->ptr);
		break;
	case BPF_LD | BPF_MEM:
		op->type = R_ANAL_OP_TYPE_MOV;
		if (INSIDE_M (f->k)) {
			SET_REG_SRC_DST (op, M[f->k], "A");
			esilprintf (op, "M[%" PFMT64d "],A,=", f->k);
		} else {
			op->type = R_ANAL_OP_TYPE_ILL;
		}
		break;
	case BPF_LDX | BPF_MEM:
		op->type = R_ANAL_OP_TYPE_MOV;
		if (INSIDE_M (f->k)) {
			SET_REG_SRC_DST (op, M[f->k], "X");
			esilprintf (op, "M[%" PFMT64d "],X,=", f->k);
		} else {
			op->type = R_ANAL_OP_TYPE_ILL;
		}
		break;
	case BPF_JMP_JA:
		op->type = R_ANAL_OP_TYPE_JMP;
		op->jump = addr + 8 + f->k * 8;
		esilprintf (op, "%" PFMT64d ",pc,=", op->jump);

		break;
	case BPF_JMP_JGT | BPF_X:
	case BPF_JMP_JGT | BPF_K:
		EMIT_CJMP (op, addr, f);
		op->cond = R_ANAL_COND_GT;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			esilprintf (op,
				"%" PFMT64d ",A,>,?{,%" PFMT64d ",pc,=,BREAK,},%" PFMT64d ",pc,=",
				op->val, op->jump, op->fail);
		} else if (BPF_SRC (f->code) == BPF_X) {
			esilprintf (op,
				"X,A,>,?{,%" PFMT64d ",pc,=,BREAK,},%" PFMT64d ",pc,=",
				op->jump, op->fail);
		} else {
			op->type = R_ANAL_OP_TYPE_ILL;
		}
		break;
	case BPF_JMP_JGE | BPF_X:
	case BPF_JMP_JGE | BPF_K:
		EMIT_CJMP (op, addr, f);
		op->cond = R_ANAL_COND_GE;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			esilprintf (op,
				"%" PFMT64d ",A,>=,?{,%" PFMT64d ",pc,=,BREAK,},%" PFMT64d ",pc,=",
				op->val, op->jump, op->fail);
		} else {
			esilprintf (op,
				"X,A,>=,?{,%" PFMT64d ",pc,=,BREAK,},%" PFMT64d ",pc,=",
				op->jump, op->fail);
		}
		break;
	case BPF_JMP_JEQ | BPF_X:
	case BPF_JMP_JEQ | BPF_K:
		EMIT_CJMP (op, addr, f);
		op->cond = R_ANAL_COND_EQ;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			esilprintf (op,
				"%" PFMT64d ",A,==,$z,?{,%" PFMT64d ",pc,=,BREAK,},%" PFMT64d ",pc,=",
				op->val, op->jump, op->fail);
		} else {
			esilprintf (op,
				"X,A,==,$z,?{,%" PFMT64d ",pc,=,BREAK,},%" PFMT64d ",pc,=",
				op->jump, op->fail);
		}
		break;
	case BPF_JMP_JSET | BPF_X:
	case BPF_JMP_JSET | BPF_K:
		EMIT_CJMP (op, addr, f);
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			esilprintf (op,
				"%" PFMT64d ",A,&,?{,%" PFMT64d ",pc,=,BREAK,},%" PFMT64d ",pc,=",
				op->val, op->jump, op->fail);
		} else {
			esilprintf (op,
				"X,A,&,!,?{,%" PFMT64d ",pc,=,BREAK,},%" PFMT64d ",pc,=",
				op->val, op->jump, op->fail);
		}
		break;
	case BPF_ALU_NEG:
		op->type = R_ANAL_OP_TYPE_NOT;
		esilprintf (op, "A,0,-,A,=");
		SET_REG_SRC_DST (op, "A", "A");
		break;
	case BPF_ALU_LSH | BPF_X:
	case BPF_ALU_LSH | BPF_K:
		op->type = R_ANAL_OP_TYPE_SHL;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			SET_REG_DST_IMM (op, "A", f->k);
			esilprintf (op, "%" PFMT64d ",A,<<=", f->k);
		} else {
			SET_REG_SRC_DST (op, "X", "A");
			esilprintf (op, "X,A,<<=");
		}
		break;
	case BPF_ALU_RSH | BPF_X:
	case BPF_ALU_RSH | BPF_K:
		op->type = R_ANAL_OP_TYPE_SHR;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			SET_REG_DST_IMM (op, "A", f->k);
			esilprintf (op, "%" PFMT64d ",A,>>=", f->k);
		} else {
			SET_REG_SRC_DST (op, "X", "A");
			esilprintf (op, "X,A,>>=");
		}
		break;
	case BPF_ALU_ADD | BPF_X:
	case BPF_ALU_ADD | BPF_K:
		op->type = R_ANAL_OP_TYPE_ADD;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			SET_REG_DST_IMM (op, "A", op->val);
			esilprintf (op, "%" PFMT64d ",A,+=", op->val);
		} else {
			SET_REG_SRC_DST (op, "X", "A");
			esilprintf (op, "X,A,+=");
		}
		break;
	case BPF_ALU_SUB | BPF_X:
	case BPF_ALU_SUB | BPF_K:
		op->type = R_ANAL_OP_TYPE_SUB;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			SET_REG_DST_IMM (op, "A", op->val);
			esilprintf (op, "%" PFMT64d ",A,-=", op->val);

		} else {
			SET_REG_SRC_DST (op, "X", "A");
			esilprintf (op, "X,A,-=");
		}
		break;
	case BPF_ALU_MUL | BPF_X:
	case BPF_ALU_MUL | BPF_K:
		op->type = R_ANAL_OP_TYPE_MUL;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			SET_REG_DST_IMM (op, "A", f->k);
			esilprintf (op, "%" PFMT64d ",A,*=", f->k);
		} else {
			SET_REG_SRC_DST (op, "X", "A");
			esilprintf (op, "X,A,*=");
		}
		break;
	case BPF_ALU_DIV | BPF_X:
	case BPF_ALU_DIV | BPF_K:
		op->type = R_ANAL_OP_TYPE_DIV;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			SET_REG_DST_IMM (op, "A", f->k);
			if (f->k == 0) {
				esilprintf (op, "0,R0,=,0,$");
			} else {
				esilprintf (op, "%" PFMT64d ",A,/=", f->k);
			}
		} else {
			SET_REG_SRC_DST (op, "X", "A");
			esilprintf (op, "0,X,==,$z,?{,0,R0,=,0,$,BREAK,},X,A,/=");
		}
		break;
	case BPF_ALU_MOD | BPF_X:
	case BPF_ALU_MOD | BPF_K:
		op->type = R_ANAL_OP_TYPE_MOD;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			SET_REG_DST_IMM (op, "A", f->k);
			if (f->k == 0) {
				esilprintf (op, "0,R0,=,0,$");
			} else {
				esilprintf (op, "%" PFMT64d ",A,%%=", f->k);
			}
		} else {
			SET_REG_SRC_DST (op, "X", "A");
			esilprintf (op, "0,X,==,$z,?{,0,R0,=,0,$,BREAK,},X,A,%%=");
		}
		break;
	case BPF_ALU_AND | BPF_X:
	case BPF_ALU_AND | BPF_K:
		op->type = R_ANAL_OP_TYPE_AND;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			SET_REG_DST_IMM (op, "A", f->k);
			esilprintf (op, "%" PFMT64d ",A,&=", f->k);
		} else {
			SET_REG_SRC_DST (op, "X", "A");
			esilprintf (op, "X,A,&=");
		}
		break;
	case BPF_ALU_OR | BPF_X:
	case BPF_ALU_OR | BPF_K:
		op->type = R_ANAL_OP_TYPE_OR;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			SET_REG_DST_IMM (op, "A", f->k);
			esilprintf (op, "%" PFMT64d ",A,|=", f->k);
		} else {
			SET_REG_SRC_DST (op, "X", "A");
			esilprintf (op, "X,A,|,A,=");
		}
		break;
	case BPF_ALU_XOR | BPF_X:
	case BPF_ALU_XOR | BPF_K:
		op->type = R_ANAL_OP_TYPE_XOR;
		if (BPF_SRC (f->code) == BPF_K) {
			op->val = f->k;
			SET_REG_DST_IMM (op, "A", f->k);
			esilprintf (op, "%" PFMT64d ",A,^=", f->k);
		} else {
			SET_REG_SRC_DST (op, "X", "A");
			esilprintf (op, "X,A,^=");
		}
		break;
	default:
		op->type = R_ANAL_OP_TYPE_ILL;
		break;
	}

	return op->size;
}
Пример #8
0
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
		  int oldproglen, struct jit_context *ctx)
{
	struct bpf_insn *insn = bpf_prog->insnsi;
	int insn_cnt = bpf_prog->len;
	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
	bool seen_exit = false;
	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
	int i, cnt = 0;
	int proglen = 0;
	u8 *prog = temp;

	emit_prologue(&prog);

	if (seen_ld_abs)
		emit_load_skb_data_hlen(&prog);

	for (i = 0; i < insn_cnt; i++, insn++) {
		const s32 imm32 = insn->imm;
		u32 dst_reg = insn->dst_reg;
		u32 src_reg = insn->src_reg;
		u8 b1 = 0, b2 = 0, b3 = 0;
		s64 jmp_offset;
		u8 jmp_cond;
		bool reload_skb_data;
		int ilen;
		u8 *func;

		switch (insn->code) {
			/* ALU */
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU64 | BPF_ADD | BPF_X:
		case BPF_ALU64 | BPF_SUB | BPF_X:
		case BPF_ALU64 | BPF_AND | BPF_X:
		case BPF_ALU64 | BPF_OR | BPF_X:
		case BPF_ALU64 | BPF_XOR | BPF_X:
			switch (BPF_OP(insn->code)) {
			case BPF_ADD: b2 = 0x01; break;
			case BPF_SUB: b2 = 0x29; break;
			case BPF_AND: b2 = 0x21; break;
			case BPF_OR: b2 = 0x09; break;
			case BPF_XOR: b2 = 0x31; break;
			}
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
			break;

			/* mov dst, src */
		case BPF_ALU64 | BPF_MOV | BPF_X:
			EMIT_mov(dst_reg, src_reg);
			break;

			/* mov32 dst, src */
		case BPF_ALU | BPF_MOV | BPF_X:
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
			break;

			/* neg dst */
		case BPF_ALU | BPF_NEG:
		case BPF_ALU64 | BPF_NEG:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
			break;

		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU64 | BPF_ADD | BPF_K:
		case BPF_ALU64 | BPF_SUB | BPF_K:
		case BPF_ALU64 | BPF_AND | BPF_K:
		case BPF_ALU64 | BPF_OR | BPF_K:
		case BPF_ALU64 | BPF_XOR | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));

			switch (BPF_OP(insn->code)) {
			case BPF_ADD: b3 = 0xC0; break;
			case BPF_SUB: b3 = 0xE8; break;
			case BPF_AND: b3 = 0xE0; break;
			case BPF_OR: b3 = 0xC8; break;
			case BPF_XOR: b3 = 0xF0; break;
			}

			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
			else
				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
			break;

		case BPF_ALU64 | BPF_MOV | BPF_K:
			/* optimization: if imm32 is positive,
			 * use 'mov eax, imm32' (which zero-extends imm32)
			 * to save 2 bytes
			 */
			if (imm32 < 0) {
				/* 'mov rax, imm32' sign extends imm32 */
				b1 = add_1mod(0x48, dst_reg);
				b2 = 0xC7;
				b3 = 0xC0;
				EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
				break;
			}

		case BPF_ALU | BPF_MOV | BPF_K:
			/* optimization: if imm32 is zero, use 'xor <dst>,<dst>'
			 * to save 3 bytes.
			 */
			if (imm32 == 0) {
				if (is_ereg(dst_reg))
					EMIT1(add_2mod(0x40, dst_reg, dst_reg));
				b2 = 0x31; /* xor */
				b3 = 0xC0;
				EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
				break;
			}

			/* mov %eax, imm32 */
			if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
			EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
			break;

		case BPF_LD | BPF_IMM | BPF_DW:
			if (insn[1].code != 0 || insn[1].src_reg != 0 ||
			    insn[1].dst_reg != 0 || insn[1].off != 0) {
				/* verifier must catch invalid insns */
				pr_err("invalid BPF_LD_IMM64 insn\n");
				return -EINVAL;
			}

			/* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
			 * to save 7 bytes.
			 */
			if (insn[0].imm == 0 && insn[1].imm == 0) {
				b1 = add_2mod(0x48, dst_reg, dst_reg);
				b2 = 0x31; /* xor */
				b3 = 0xC0;
				EMIT3(b1, b2, add_2reg(b3, dst_reg, dst_reg));

				insn++;
				i++;
				break;
			}

			/* movabsq %rax, imm64 */
			EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
			EMIT(insn[0].imm, 4);
			EMIT(insn[1].imm, 4);

			insn++;
			i++;
			break;

			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
		case BPF_ALU | BPF_MOD | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_MOD | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU64 | BPF_MOD | BPF_X:
		case BPF_ALU64 | BPF_DIV | BPF_X:
		case BPF_ALU64 | BPF_MOD | BPF_K:
		case BPF_ALU64 | BPF_DIV | BPF_K:
			EMIT1(0x50); /* push rax */
			EMIT1(0x52); /* push rdx */

			if (BPF_SRC(insn->code) == BPF_X)
				/* mov r11, src_reg */
				EMIT_mov(AUX_REG, src_reg);
			else
				/* mov r11, imm32 */
				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);

			/* mov rax, dst_reg */
			EMIT_mov(BPF_REG_0, dst_reg);

			/* xor edx, edx
			 * equivalent to 'xor rdx, rdx', but one byte less
			 */
			EMIT2(0x31, 0xd2);

			if (BPF_SRC(insn->code) == BPF_X) {
				/* if (src_reg == 0) return 0 */

				/* cmp r11, 0 */
				EMIT4(0x49, 0x83, 0xFB, 0x00);

				/* jne .+9 (skip over pop, pop, xor and jmp) */
				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
				EMIT1(0x5A); /* pop rdx */
				EMIT1(0x58); /* pop rax */
				EMIT2(0x31, 0xc0); /* xor eax, eax */

				/* jmp cleanup_addr
				 * addrs[i] - 11, because there are 11 bytes
				 * after this insn: div, mov, pop, pop, mov
				 */
				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
				EMIT1_off32(0xE9, jmp_offset);
			}

			if (BPF_CLASS(insn->code) == BPF_ALU64)
				/* div r11 */
				EMIT3(0x49, 0xF7, 0xF3);
			else
				/* div r11d */
				EMIT3(0x41, 0xF7, 0xF3);

			if (BPF_OP(insn->code) == BPF_MOD)
				/* mov r11, rdx */
				EMIT3(0x49, 0x89, 0xD3);
			else
				/* mov r11, rax */
				EMIT3(0x49, 0x89, 0xC3);

			EMIT1(0x5A); /* pop rdx */
			EMIT1(0x58); /* pop rax */

			/* mov dst_reg, r11 */
			EMIT_mov(dst_reg, AUX_REG);
			break;

		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU64 | BPF_MUL | BPF_K:
		case BPF_ALU64 | BPF_MUL | BPF_X:
			EMIT1(0x50); /* push rax */
			EMIT1(0x52); /* push rdx */

			/* mov r11, dst_reg */
			EMIT_mov(AUX_REG, dst_reg);

			if (BPF_SRC(insn->code) == BPF_X)
				/* mov rax, src_reg */
				EMIT_mov(BPF_REG_0, src_reg);
			else
				/* mov rax, imm32 */
				EMIT3_off32(0x48, 0xC7, 0xC0, imm32);

			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, AUX_REG));
			else if (is_ereg(AUX_REG))
				EMIT1(add_1mod(0x40, AUX_REG));
			/* mul(q) r11 */
			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));

			/* mov r11, rax */
			EMIT_mov(AUX_REG, BPF_REG_0);

			EMIT1(0x5A); /* pop rdx */
			EMIT1(0x58); /* pop rax */

			/* mov dst_reg, r11 */
			EMIT_mov(dst_reg, AUX_REG);
			break;

			/* shifts */
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_ARSH | BPF_K:
		case BPF_ALU64 | BPF_LSH | BPF_K:
		case BPF_ALU64 | BPF_RSH | BPF_K:
		case BPF_ALU64 | BPF_ARSH | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
			EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
			break;

		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_ARSH | BPF_X:
		case BPF_ALU64 | BPF_LSH | BPF_X:
		case BPF_ALU64 | BPF_RSH | BPF_X:
		case BPF_ALU64 | BPF_ARSH | BPF_X:

			/* check for bad case when dst_reg == rcx */
			if (dst_reg == BPF_REG_4) {
				/* mov r11, dst_reg */
				EMIT_mov(AUX_REG, dst_reg);
				dst_reg = AUX_REG;
			}

			if (src_reg != BPF_REG_4) { /* common case */
				EMIT1(0x51); /* push rcx */

				/* mov rcx, src_reg */
				EMIT_mov(BPF_REG_4, src_reg);
			}

			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
			EMIT2(0xD3, add_1reg(b3, dst_reg));

			if (src_reg != BPF_REG_4)
				EMIT1(0x59); /* pop rcx */

			if (insn->dst_reg == BPF_REG_4)
				/* mov dst_reg, r11 */
				EMIT_mov(insn->dst_reg, AUX_REG);
			break;

		case BPF_ALU | BPF_END | BPF_FROM_BE:
			switch (imm32) {
			case 16:
				/* emit 'ror %ax, 8' to swap lower 2 bytes */
				EMIT1(0x66);
				if (is_ereg(dst_reg))
					EMIT1(0x41);
				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);

				/* emit 'movzwl eax, ax' */
				if (is_ereg(dst_reg))
					EMIT3(0x45, 0x0F, 0xB7);
				else
					EMIT2(0x0F, 0xB7);
				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 32:
				/* emit 'bswap eax' to swap lower 4 bytes */
				if (is_ereg(dst_reg))
					EMIT2(0x41, 0x0F);
				else
					EMIT1(0x0F);
				EMIT1(add_1reg(0xC8, dst_reg));
				break;
			case 64:
				/* emit 'bswap rax' to swap 8 bytes */
				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
				      add_1reg(0xC8, dst_reg));
				break;
			}
			break;

		case BPF_ALU | BPF_END | BPF_FROM_LE:
			switch (imm32) {
			case 16:
				/* emit 'movzwl eax, ax' to zero extend 16-bit
				 * into 64 bit
				 */
				if (is_ereg(dst_reg))
					EMIT3(0x45, 0x0F, 0xB7);
				else
					EMIT2(0x0F, 0xB7);
				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 32:
				/* emit 'mov eax, eax' to clear upper 32-bits */
				if (is_ereg(dst_reg))
					EMIT1(0x45);
				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 64:
				/* nop */
				break;
			}
			break;

			/* ST: *(u8*)(dst_reg + off) = imm */
		case BPF_ST | BPF_MEM | BPF_B:
			if (is_ereg(dst_reg))
				EMIT2(0x41, 0xC6);
			else
				EMIT1(0xC6);
			goto st;
		case BPF_ST | BPF_MEM | BPF_H:
			if (is_ereg(dst_reg))
				EMIT3(0x66, 0x41, 0xC7);
			else
				EMIT2(0x66, 0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_W:
			if (is_ereg(dst_reg))
				EMIT2(0x41, 0xC7);
			else
				EMIT1(0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_DW:
			EMIT2(add_1mod(0x48, dst_reg), 0xC7);

st:			if (is_imm8(insn->off))
				EMIT2(add_1reg(0x40, dst_reg), insn->off);
			else
				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);

			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
			break;

			/* STX: *(u8*)(dst_reg + off) = src_reg */
		case BPF_STX | BPF_MEM | BPF_B:
			/* emit 'mov byte ptr [rax + off], al' */
			if (is_ereg(dst_reg) || is_ereg(src_reg) ||
			    /* have to add extra byte for x86 SIL, DIL regs */
			    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
			else
				EMIT1(0x88);
			goto stx;
		case BPF_STX | BPF_MEM | BPF_H:
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
			else
				EMIT2(0x66, 0x89);
			goto stx;
		case BPF_STX | BPF_MEM | BPF_W:
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
			else
				EMIT1(0x89);
			goto stx;
		case BPF_STX | BPF_MEM | BPF_DW:
			EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
stx:			if (is_imm8(insn->off))
				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
			else
				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
					    insn->off);
			break;

			/* LDX: dst_reg = *(u8*)(src_reg + off) */
		case BPF_LDX | BPF_MEM | BPF_B:
			/* emit 'movzx rax, byte ptr [rax + off]' */
			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
			goto ldx;
		case BPF_LDX | BPF_MEM | BPF_H:
			/* emit 'movzx rax, word ptr [rax + off]' */
			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
			goto ldx;
		case BPF_LDX | BPF_MEM | BPF_W:
			/* emit 'mov eax, dword ptr [rax+0x14]' */
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
			else
				EMIT1(0x8B);
			goto ldx;
		case BPF_LDX | BPF_MEM | BPF_DW:
			/* emit 'mov rax, qword ptr [rax+0x14]' */
			EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
ldx:			/* if insn->off == 0 we can save one extra byte, but
			 * special case of x86 r13 which always needs an offset
			 * is not worth the hassle
			 */
			if (is_imm8(insn->off))
				EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
			else
				EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
					    insn->off);
			break;

			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
		case BPF_STX | BPF_XADD | BPF_W:
			/* emit 'lock add dword ptr [rax + off], eax' */
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
			else
				EMIT2(0xF0, 0x01);
			goto xadd;
		case BPF_STX | BPF_XADD | BPF_DW:
			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
xadd:			if (is_imm8(insn->off))
				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
			else
				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
					    insn->off);
			break;

			/* call */
		case BPF_JMP | BPF_CALL:
			func = (u8 *) __bpf_call_base + imm32;
			jmp_offset = func - (image + addrs[i]);
			if (seen_ld_abs) {
				reload_skb_data = bpf_helper_changes_skb_data(func);
				if (reload_skb_data) {
					EMIT1(0x57); /* push %rdi */
					jmp_offset += 22; /* pop, mov, sub, mov */
				} else {
					EMIT2(0x41, 0x52); /* push %r10 */
					EMIT2(0x41, 0x51); /* push %r9 */
					/* need to adjust jmp offset, since
					 * pop %r9, pop %r10 take 4 bytes after call insn
					 */
					jmp_offset += 4;
				}
			}
			if (!imm32 || !is_simm32(jmp_offset)) {
				pr_err("unsupported bpf func %d addr %p image %p\n",
				       imm32, func, image);
				return -EINVAL;
			}
			EMIT1_off32(0xE8, jmp_offset);
			if (seen_ld_abs) {
				if (reload_skb_data) {
					EMIT1(0x5F); /* pop %rdi */
					emit_load_skb_data_hlen(&prog);
				} else {
					EMIT2(0x41, 0x59); /* pop %r9 */
					EMIT2(0x41, 0x5A); /* pop %r10 */
				}
			}
			break;

		case BPF_JMP | BPF_CALL | BPF_X:
			emit_bpf_tail_call(&prog);
			break;

			/* cond jump */
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JNE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JSGT | BPF_X:
		case BPF_JMP | BPF_JSGE | BPF_X:
			/* cmp dst_reg, src_reg */
			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
			      add_2reg(0xC0, dst_reg, src_reg));
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_X:
			/* test dst_reg, src_reg */
			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
			      add_2reg(0xC0, dst_reg, src_reg));
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_K:
			/* test dst_reg, imm32 */
			EMIT1(add_1mod(0x48, dst_reg));
			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JNE | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JSGT | BPF_K:
		case BPF_JMP | BPF_JSGE | BPF_K:
			/* cmp dst_reg, imm8/32 */
			EMIT1(add_1mod(0x48, dst_reg));

			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
			else
				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);

emit_cond_jmp:		/* convert BPF opcode to x86 */
			switch (BPF_OP(insn->code)) {
			case BPF_JEQ:
				jmp_cond = X86_JE;
				break;
			case BPF_JSET:
			case BPF_JNE:
				jmp_cond = X86_JNE;
				break;
			case BPF_JGT:
				/* GT is unsigned '>', JA in x86 */
				jmp_cond = X86_JA;
				break;
			case BPF_JGE:
				/* GE is unsigned '>=', JAE in x86 */
				jmp_cond = X86_JAE;
				break;
			case BPF_JSGT:
				/* signed '>', GT in x86 */
				jmp_cond = X86_JG;
				break;
			case BPF_JSGE:
				/* signed '>=', GE in x86 */
				jmp_cond = X86_JGE;
				break;
			default: /* to silence gcc warning */
				return -EFAULT;
			}
			jmp_offset = addrs[i + insn->off] - addrs[i];
			if (is_imm8(jmp_offset)) {
				EMIT2(jmp_cond, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
			} else {
				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}

			break;

		case BPF_JMP | BPF_JA:
			jmp_offset = addrs[i + insn->off] - addrs[i];
			if (!jmp_offset)
				/* optimize out nop jumps */
				break;
emit_jmp:
			if (is_imm8(jmp_offset)) {
				EMIT2(0xEB, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT1_off32(0xE9, jmp_offset);
			} else {
				pr_err("jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}
			break;

		case BPF_LD | BPF_IND | BPF_W:
			func = sk_load_word;
			goto common_load;
		case BPF_LD | BPF_ABS | BPF_W:
			func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
common_load:
			ctx->seen_ld_abs = seen_ld_abs = true;
			jmp_offset = func - (image + addrs[i]);
			if (!func || !is_simm32(jmp_offset)) {
				pr_err("unsupported bpf func %d addr %p image %p\n",
				       imm32, func, image);
				return -EINVAL;
			}
			if (BPF_MODE(insn->code) == BPF_ABS) {
				/* mov %esi, imm32 */
				EMIT1_off32(0xBE, imm32);
			} else {
				/* mov %rsi, src_reg */
				EMIT_mov(BPF_REG_2, src_reg);
				if (imm32) {
					if (is_imm8(imm32))
						/* add %esi, imm8 */
						EMIT3(0x83, 0xC6, imm32);
					else
						/* add %esi, imm32 */
						EMIT2_off32(0x81, 0xC6, imm32);
				}
			}
			/* skb pointer is in R6 (%rbx), it will be copied into
			 * %rdi if skb_copy_bits() call is necessary.
			 * sk_load_* helpers also use %r10 and %r9d.
			 * See bpf_jit.S
			 */
			EMIT1_off32(0xE8, jmp_offset); /* call */
			break;

		case BPF_LD | BPF_IND | BPF_H:
			func = sk_load_half;
			goto common_load;
		case BPF_LD | BPF_ABS | BPF_H:
			func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
			goto common_load;
		case BPF_LD | BPF_IND | BPF_B:
			func = sk_load_byte;
			goto common_load;
		case BPF_LD | BPF_ABS | BPF_B:
			func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
			goto common_load;

		case BPF_JMP | BPF_EXIT:
			if (seen_exit) {
				jmp_offset = ctx->cleanup_addr - addrs[i];
				goto emit_jmp;
			}
			seen_exit = true;
			/* update cleanup_addr */
			ctx->cleanup_addr = proglen;
			/* mov rbx, qword ptr [rbp-X] */
			EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
			/* mov r13, qword ptr [rbp-X] */
			EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
			/* mov r14, qword ptr [rbp-X] */
			EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
			/* mov r15, qword ptr [rbp-X] */
			EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);

			EMIT1(0xC9); /* leave */
			EMIT1(0xC3); /* ret */
			break;

		default:
			/* By design x64 JIT should support all BPF instructions
			 * This error will be seen if new instruction was added
			 * to interpreter, but not to JIT
			 * or if there is junk in bpf_prog
			 */
			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
			return -EINVAL;
		}

		ilen = prog - temp;
		if (ilen > BPF_MAX_INSN_SIZE) {
			pr_err("bpf_jit_compile fatal insn size error\n");
			return -EFAULT;
		}

		if (image) {
			if (unlikely(proglen + ilen > oldproglen)) {
				pr_err("bpf_jit_compile fatal error\n");
				return -EFAULT;
			}
			memcpy(image + proglen, temp, ilen);
		}
		proglen += ilen;
		addrs[i] = proglen;
		prog = temp;
	}
	return proglen;
}
Пример #9
0
/* Assemble the body code between the prologue & epilogue */
static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
			      struct codegen_context *ctx,
			      u32 *addrs, bool extra_pass)
{
	const struct bpf_insn *insn = fp->insnsi;
	int flen = fp->len;
	int i, ret;

	/* Start of epilogue code - will only be valid 2nd pass onwards */
	u32 exit_addr = addrs[flen];

	for (i = 0; i < flen; i++) {
		u32 code = insn[i].code;
		u32 dst_reg = b2p[insn[i].dst_reg];
		u32 src_reg = b2p[insn[i].src_reg];
		s16 off = insn[i].off;
		s32 imm = insn[i].imm;
		bool func_addr_fixed;
		u64 func_addr;
		u64 imm64;
		u32 true_cond;
		u32 tmp_idx;

		/*
		 * addrs[] maps a BPF bytecode address into a real offset from
		 * the start of the body code.
		 */
		addrs[i] = ctx->idx * 4;

		/*
		 * As an optimization, we note down which non-volatile registers
		 * are used so that we can only save/restore those in our
		 * prologue and epilogue. We do this here regardless of whether
		 * the actual BPF instruction uses src/dst registers or not
		 * (for instance, BPF_CALL does not use them). The expectation
		 * is that those instructions will have src_reg/dst_reg set to
		 * 0. Even otherwise, we just lose some prologue/epilogue
		 * optimization but everything else should work without
		 * any issues.
		 */
		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
			bpf_set_seen_register(ctx, insn[i].dst_reg);
		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
			bpf_set_seen_register(ctx, insn[i].src_reg);

		switch (code) {
		/*
		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
		 */
		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
			PPC_ADD(dst_reg, dst_reg, src_reg);
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
			PPC_SUB(dst_reg, dst_reg, src_reg);
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
			if (BPF_OP(code) == BPF_SUB)
				imm = -imm;
			if (imm) {
				if (imm >= -32768 && imm < 32768)
					PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
				else {
					PPC_LI32(b2p[TMP_REG_1], imm);
					PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
				}
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
			if (BPF_CLASS(code) == BPF_ALU)
				PPC_MULW(dst_reg, dst_reg, src_reg);
			else
				PPC_MULD(dst_reg, dst_reg, src_reg);
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
			if (imm >= -32768 && imm < 32768)
				PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
			else {
				PPC_LI32(b2p[TMP_REG_1], imm);
				if (BPF_CLASS(code) == BPF_ALU)
					PPC_MULW(dst_reg, dst_reg,
							b2p[TMP_REG_1]);
				else
					PPC_MULD(dst_reg, dst_reg,
							b2p[TMP_REG_1]);
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
			if (BPF_OP(code) == BPF_MOD) {
				PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
				PPC_MULW(b2p[TMP_REG_1], src_reg,
						b2p[TMP_REG_1]);
				PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
			} else
				PPC_DIVWU(dst_reg, dst_reg, src_reg);
			goto bpf_alu32_trunc;
		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
			if (BPF_OP(code) == BPF_MOD) {
				PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
				PPC_MULD(b2p[TMP_REG_1], src_reg,
						b2p[TMP_REG_1]);
				PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
			} else
				PPC_DIVD(dst_reg, dst_reg, src_reg);
			break;
		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
			if (imm == 0)
				return -EINVAL;
			else if (imm == 1)
				goto bpf_alu32_trunc;

			PPC_LI32(b2p[TMP_REG_1], imm);
			switch (BPF_CLASS(code)) {
			case BPF_ALU:
				if (BPF_OP(code) == BPF_MOD) {
					PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
							b2p[TMP_REG_1]);
					PPC_MULW(b2p[TMP_REG_1],
							b2p[TMP_REG_1],
							b2p[TMP_REG_2]);
					PPC_SUB(dst_reg, dst_reg,
							b2p[TMP_REG_1]);
				} else
					PPC_DIVWU(dst_reg, dst_reg,
							b2p[TMP_REG_1]);
				break;
			case BPF_ALU64:
				if (BPF_OP(code) == BPF_MOD) {
					PPC_DIVD(b2p[TMP_REG_2], dst_reg,
							b2p[TMP_REG_1]);
					PPC_MULD(b2p[TMP_REG_1],
							b2p[TMP_REG_1],
							b2p[TMP_REG_2]);
					PPC_SUB(dst_reg, dst_reg,
							b2p[TMP_REG_1]);
				} else
					PPC_DIVD(dst_reg, dst_reg,
							b2p[TMP_REG_1]);
				break;
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
			PPC_NEG(dst_reg, dst_reg);
			goto bpf_alu32_trunc;

		/*
		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
		 */
		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
			PPC_AND(dst_reg, dst_reg, src_reg);
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
			if (!IMM_H(imm))
				PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
			else {
				/* Sign-extended */
				PPC_LI32(b2p[TMP_REG_1], imm);
				PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
			PPC_OR(dst_reg, dst_reg, src_reg);
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
				/* Sign-extended */
				PPC_LI32(b2p[TMP_REG_1], imm);
				PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
			} else {
				if (IMM_L(imm))
					PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
				if (IMM_H(imm))
					PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
			PPC_XOR(dst_reg, dst_reg, src_reg);
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
				/* Sign-extended */
				PPC_LI32(b2p[TMP_REG_1], imm);
				PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
			} else {
				if (IMM_L(imm))
					PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
				if (IMM_H(imm))
					PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
			}
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
			/* slw clears top 32 bits */
			PPC_SLW(dst_reg, dst_reg, src_reg);
			break;
		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
			PPC_SLD(dst_reg, dst_reg, src_reg);
			break;
		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
			/* with imm 0, we still need to clear top 32 bits */
			PPC_SLWI(dst_reg, dst_reg, imm);
			break;
		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
			if (imm != 0)
				PPC_SLDI(dst_reg, dst_reg, imm);
			break;
		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
			PPC_SRW(dst_reg, dst_reg, src_reg);
			break;
		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
			PPC_SRD(dst_reg, dst_reg, src_reg);
			break;
		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
			PPC_SRWI(dst_reg, dst_reg, imm);
			break;
		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
			if (imm != 0)
				PPC_SRDI(dst_reg, dst_reg, imm);
			break;
		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
			PPC_SRAW(dst_reg, dst_reg, src_reg);
			goto bpf_alu32_trunc;
		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
			PPC_SRAD(dst_reg, dst_reg, src_reg);
			break;
		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
			PPC_SRAWI(dst_reg, dst_reg, imm);
			goto bpf_alu32_trunc;
		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
			if (imm != 0)
				PPC_SRADI(dst_reg, dst_reg, imm);
			break;

		/*
		 * MOV
		 */
		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
			PPC_MR(dst_reg, src_reg);
			goto bpf_alu32_trunc;
		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
			PPC_LI32(dst_reg, imm);
			if (imm < 0)
				goto bpf_alu32_trunc;
			break;

bpf_alu32_trunc:
		/* Truncate to 32-bits */
		if (BPF_CLASS(code) == BPF_ALU)
			PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
		break;

		/*
		 * BPF_FROM_BE/LE
		 */
		case BPF_ALU | BPF_END | BPF_FROM_LE:
		case BPF_ALU | BPF_END | BPF_FROM_BE:
#ifdef __BIG_ENDIAN__
			if (BPF_SRC(code) == BPF_FROM_BE)
				goto emit_clear;
#else /* !__BIG_ENDIAN__ */
			if (BPF_SRC(code) == BPF_FROM_LE)
				goto emit_clear;
#endif
			switch (imm) {
			case 16:
				/* Rotate 8 bits left & mask with 0x0000ff00 */
				PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
				/* Rotate 8 bits right & insert LSB to reg */
				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
				/* Move result back to dst_reg */
				PPC_MR(dst_reg, b2p[TMP_REG_1]);
				break;
			case 32:
				/*
				 * Rotate word left by 8 bits:
				 * 2 bytes are already in their final position
				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
				 */
				PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
				/* Rotate 24 bits and insert byte 1 */
				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
				/* Rotate 24 bits and insert byte 3 */
				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
				PPC_MR(dst_reg, b2p[TMP_REG_1]);
				break;
			case 64:
				/*
				 * Way easier and faster(?) to store the value
				 * into stack and then use ldbrx
				 *
				 * ctx->seen will be reliable in pass2, but
				 * the instructions generated will remain the
				 * same across all passes
				 */
				PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
				PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
				PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
				break;
			}
			break;

emit_clear:
			switch (imm) {
			case 16:
				/* zero-extend 16 bits into 64 bits */
				PPC_RLDICL(dst_reg, dst_reg, 0, 48);
				break;
			case 32:
				/* zero-extend 32 bits into 64 bits */
				PPC_RLDICL(dst_reg, dst_reg, 0, 32);
				break;
			case 64:
				/* nop */
				break;
			}
			break;

		/*
		 * BPF_ST(X)
		 */
		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
			if (BPF_CLASS(code) == BPF_ST) {
				PPC_LI(b2p[TMP_REG_1], imm);
				src_reg = b2p[TMP_REG_1];
			}
			PPC_STB(src_reg, dst_reg, off);
			break;
		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
			if (BPF_CLASS(code) == BPF_ST) {
				PPC_LI(b2p[TMP_REG_1], imm);
				src_reg = b2p[TMP_REG_1];
			}
			PPC_STH(src_reg, dst_reg, off);
			break;
		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
			if (BPF_CLASS(code) == BPF_ST) {
				PPC_LI32(b2p[TMP_REG_1], imm);
				src_reg = b2p[TMP_REG_1];
			}
			PPC_STW(src_reg, dst_reg, off);
			break;
		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
			if (BPF_CLASS(code) == BPF_ST) {
				PPC_LI32(b2p[TMP_REG_1], imm);
				src_reg = b2p[TMP_REG_1];
			}
			PPC_STD(src_reg, dst_reg, off);
			break;

		/*
		 * BPF_STX XADD (atomic_add)
		 */
		/* *(u32 *)(dst + off) += src */
		case BPF_STX | BPF_XADD | BPF_W:
			/* Get EA into TMP_REG_1 */
			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
			tmp_idx = ctx->idx * 4;
			/* load value from memory into TMP_REG_2 */
			PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
			/* add value from src_reg into this */
			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
			/* store result back */
			PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
			/* we're done if this succeeded */
			PPC_BCC_SHORT(COND_NE, tmp_idx);
			break;
		/* *(u64 *)(dst + off) += src */
		case BPF_STX | BPF_XADD | BPF_DW:
			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
			tmp_idx = ctx->idx * 4;
			PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
			PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
			PPC_BCC_SHORT(COND_NE, tmp_idx);
			break;

		/*
		 * BPF_LDX
		 */
		/* dst = *(u8 *)(ul) (src + off) */
		case BPF_LDX | BPF_MEM | BPF_B:
			PPC_LBZ(dst_reg, src_reg, off);
			break;
		/* dst = *(u16 *)(ul) (src + off) */
		case BPF_LDX | BPF_MEM | BPF_H:
			PPC_LHZ(dst_reg, src_reg, off);
			break;
		/* dst = *(u32 *)(ul) (src + off) */
		case BPF_LDX | BPF_MEM | BPF_W:
			PPC_LWZ(dst_reg, src_reg, off);
			break;
		/* dst = *(u64 *)(ul) (src + off) */
		case BPF_LDX | BPF_MEM | BPF_DW:
			PPC_LD(dst_reg, src_reg, off);
			break;

		/*
		 * Doubleword load
		 * 16 byte instruction that uses two 'struct bpf_insn'
		 */
		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
			imm64 = ((u64)(u32) insn[i].imm) |
				    (((u64)(u32) insn[i+1].imm) << 32);
			/* Adjust for two bpf instructions */
			addrs[++i] = ctx->idx * 4;
			PPC_LI64(dst_reg, imm64);
			break;

		/*
		 * Return/Exit
		 */
		case BPF_JMP | BPF_EXIT:
			/*
			 * If this isn't the very last instruction, branch to
			 * the epilogue. If we _are_ the last instruction,
			 * we'll just fall through to the epilogue.
			 */
			if (i != flen - 1)
				PPC_JMP(exit_addr);
			/* else fall through to the epilogue */
			break;

		/*
		 * Call kernel helper or bpf function
		 */
		case BPF_JMP | BPF_CALL:
			ctx->seen |= SEEN_FUNC;

			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
						    &func_addr, &func_addr_fixed);
			if (ret < 0)
				return ret;

			if (func_addr_fixed)
				bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
			else
				bpf_jit_emit_func_call_rel(image, ctx, func_addr);
			/* move return value from r3 to BPF_REG_0 */
			PPC_MR(b2p[BPF_REG_0], 3);
			break;

		/*
		 * Jumps and branches
		 */
		case BPF_JMP | BPF_JA:
			PPC_JMP(addrs[i + 1 + off]);
			break;

		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JSGT | BPF_K:
		case BPF_JMP | BPF_JSGT | BPF_X:
			true_cond = COND_GT;
			goto cond_branch;
		case BPF_JMP | BPF_JLT | BPF_K:
		case BPF_JMP | BPF_JLT | BPF_X:
		case BPF_JMP | BPF_JSLT | BPF_K:
		case BPF_JMP | BPF_JSLT | BPF_X:
			true_cond = COND_LT;
			goto cond_branch;
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JSGE | BPF_K:
		case BPF_JMP | BPF_JSGE | BPF_X:
			true_cond = COND_GE;
			goto cond_branch;
		case BPF_JMP | BPF_JLE | BPF_K:
		case BPF_JMP | BPF_JLE | BPF_X:
		case BPF_JMP | BPF_JSLE | BPF_K:
		case BPF_JMP | BPF_JSLE | BPF_X:
			true_cond = COND_LE;
			goto cond_branch;
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
			true_cond = COND_EQ;
			goto cond_branch;
		case BPF_JMP | BPF_JNE | BPF_K:
		case BPF_JMP | BPF_JNE | BPF_X:
			true_cond = COND_NE;
			goto cond_branch;
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
			true_cond = COND_NE;
			/* Fall through */

cond_branch:
			switch (code) {
			case BPF_JMP | BPF_JGT | BPF_X:
			case BPF_JMP | BPF_JLT | BPF_X:
			case BPF_JMP | BPF_JGE | BPF_X:
			case BPF_JMP | BPF_JLE | BPF_X:
			case BPF_JMP | BPF_JEQ | BPF_X:
			case BPF_JMP | BPF_JNE | BPF_X:
				/* unsigned comparison */
				PPC_CMPLD(dst_reg, src_reg);
				break;
			case BPF_JMP | BPF_JSGT | BPF_X:
			case BPF_JMP | BPF_JSLT | BPF_X:
			case BPF_JMP | BPF_JSGE | BPF_X:
			case BPF_JMP | BPF_JSLE | BPF_X:
				/* signed comparison */
				PPC_CMPD(dst_reg, src_reg);
				break;
			case BPF_JMP | BPF_JSET | BPF_X:
				PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
				break;
			case BPF_JMP | BPF_JNE | BPF_K:
			case BPF_JMP | BPF_JEQ | BPF_K:
			case BPF_JMP | BPF_JGT | BPF_K:
			case BPF_JMP | BPF_JLT | BPF_K:
			case BPF_JMP | BPF_JGE | BPF_K:
			case BPF_JMP | BPF_JLE | BPF_K:
				/*
				 * Need sign-extended load, so only positive
				 * values can be used as imm in cmpldi
				 */
				if (imm >= 0 && imm < 32768)
					PPC_CMPLDI(dst_reg, imm);
				else {
					/* sign-extending load */
					PPC_LI32(b2p[TMP_REG_1], imm);
					/* ... but unsigned comparison */
					PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
				}
				break;
			case BPF_JMP | BPF_JSGT | BPF_K:
			case BPF_JMP | BPF_JSLT | BPF_K:
			case BPF_JMP | BPF_JSGE | BPF_K:
			case BPF_JMP | BPF_JSLE | BPF_K:
				/*
				 * signed comparison, so any 16-bit value
				 * can be used in cmpdi
				 */
				if (imm >= -32768 && imm < 32768)
					PPC_CMPDI(dst_reg, imm);
				else {
					PPC_LI32(b2p[TMP_REG_1], imm);
					PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
				}
				break;
			case BPF_JMP | BPF_JSET | BPF_K:
				/* andi does not sign-extend the immediate */
				if (imm >= 0 && imm < 32768)
					/* PPC_ANDI is _only/always_ dot-form */
					PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
				else {
					PPC_LI32(b2p[TMP_REG_1], imm);
					PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
						    b2p[TMP_REG_1]);
				}
				break;
			}
			PPC_BCC(true_cond, addrs[i + 1 + off]);
			break;

		/*
		 * Tail call
		 */
		case BPF_JMP | BPF_TAIL_CALL:
			ctx->seen |= SEEN_TAILCALL;
			bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
			break;

		default:
			/*
			 * The filter contains something cruel & unusual.
			 * We don't handle it, but also there shouldn't be
			 * anything missing from our list.
			 */
			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
					code, i);
			return -ENOTSUPP;
		}
	}

	/* Set end-of-body-code address for exit. */
	addrs[i] = ctx->idx * 4;

	return 0;
}
Пример #10
0
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
		  int oldproglen, struct jit_context *ctx)
{
	struct bpf_insn *insn = bpf_prog->insnsi;
	int insn_cnt = bpf_prog->len;
	bool seen_exit = false;
	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
	int i, cnt = 0;
	int proglen = 0;
	u8 *prog = temp;

	emit_prologue(&prog, bpf_prog->aux->stack_depth,
		      bpf_prog_was_classic(bpf_prog));

	for (i = 0; i < insn_cnt; i++, insn++) {
		const s32 imm32 = insn->imm;
		u32 dst_reg = insn->dst_reg;
		u32 src_reg = insn->src_reg;
		u8 b2 = 0, b3 = 0;
		s64 jmp_offset;
		u8 jmp_cond;
		int ilen;
		u8 *func;

		switch (insn->code) {
			/* ALU */
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU64 | BPF_ADD | BPF_X:
		case BPF_ALU64 | BPF_SUB | BPF_X:
		case BPF_ALU64 | BPF_AND | BPF_X:
		case BPF_ALU64 | BPF_OR | BPF_X:
		case BPF_ALU64 | BPF_XOR | BPF_X:
			switch (BPF_OP(insn->code)) {
			case BPF_ADD: b2 = 0x01; break;
			case BPF_SUB: b2 = 0x29; break;
			case BPF_AND: b2 = 0x21; break;
			case BPF_OR: b2 = 0x09; break;
			case BPF_XOR: b2 = 0x31; break;
			}
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_2mod(0x48, dst_reg, src_reg));
			else if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT1(add_2mod(0x40, dst_reg, src_reg));
			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
			break;

		case BPF_ALU64 | BPF_MOV | BPF_X:
		case BPF_ALU | BPF_MOV | BPF_X:
			emit_mov_reg(&prog,
				     BPF_CLASS(insn->code) == BPF_ALU64,
				     dst_reg, src_reg);
			break;

			/* neg dst */
		case BPF_ALU | BPF_NEG:
		case BPF_ALU64 | BPF_NEG:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));
			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
			break;

		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU64 | BPF_ADD | BPF_K:
		case BPF_ALU64 | BPF_SUB | BPF_K:
		case BPF_ALU64 | BPF_AND | BPF_K:
		case BPF_ALU64 | BPF_OR | BPF_K:
		case BPF_ALU64 | BPF_XOR | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));

			/*
			 * b3 holds 'normal' opcode, b2 short form only valid
			 * in case dst is eax/rax.
			 */
			switch (BPF_OP(insn->code)) {
			case BPF_ADD:
				b3 = 0xC0;
				b2 = 0x05;
				break;
			case BPF_SUB:
				b3 = 0xE8;
				b2 = 0x2D;
				break;
			case BPF_AND:
				b3 = 0xE0;
				b2 = 0x25;
				break;
			case BPF_OR:
				b3 = 0xC8;
				b2 = 0x0D;
				break;
			case BPF_XOR:
				b3 = 0xF0;
				b2 = 0x35;
				break;
			}

			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
			else if (is_axreg(dst_reg))
				EMIT1_off32(b2, imm32);
			else
				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
			break;

		case BPF_ALU64 | BPF_MOV | BPF_K:
		case BPF_ALU | BPF_MOV | BPF_K:
			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
				       dst_reg, imm32);
			break;

		case BPF_LD | BPF_IMM | BPF_DW:
			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
			insn++;
			i++;
			break;

			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
		case BPF_ALU | BPF_MOD | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_MOD | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU64 | BPF_MOD | BPF_X:
		case BPF_ALU64 | BPF_DIV | BPF_X:
		case BPF_ALU64 | BPF_MOD | BPF_K:
		case BPF_ALU64 | BPF_DIV | BPF_K:
			EMIT1(0x50); /* push rax */
			EMIT1(0x52); /* push rdx */

			if (BPF_SRC(insn->code) == BPF_X)
				/* mov r11, src_reg */
				EMIT_mov(AUX_REG, src_reg);
			else
				/* mov r11, imm32 */
				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);

			/* mov rax, dst_reg */
			EMIT_mov(BPF_REG_0, dst_reg);

			/*
			 * xor edx, edx
			 * equivalent to 'xor rdx, rdx', but one byte less
			 */
			EMIT2(0x31, 0xd2);

			if (BPF_CLASS(insn->code) == BPF_ALU64)
				/* div r11 */
				EMIT3(0x49, 0xF7, 0xF3);
			else
				/* div r11d */
				EMIT3(0x41, 0xF7, 0xF3);

			if (BPF_OP(insn->code) == BPF_MOD)
				/* mov r11, rdx */
				EMIT3(0x49, 0x89, 0xD3);
			else
				/* mov r11, rax */
				EMIT3(0x49, 0x89, 0xC3);

			EMIT1(0x5A); /* pop rdx */
			EMIT1(0x58); /* pop rax */

			/* mov dst_reg, r11 */
			EMIT_mov(dst_reg, AUX_REG);
			break;

		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU64 | BPF_MUL | BPF_K:
		case BPF_ALU64 | BPF_MUL | BPF_X:
		{
			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;

			if (dst_reg != BPF_REG_0)
				EMIT1(0x50); /* push rax */
			if (dst_reg != BPF_REG_3)
				EMIT1(0x52); /* push rdx */

			/* mov r11, dst_reg */
			EMIT_mov(AUX_REG, dst_reg);

			if (BPF_SRC(insn->code) == BPF_X)
				emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
			else
				emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);

			if (is64)
				EMIT1(add_1mod(0x48, AUX_REG));
			else if (is_ereg(AUX_REG))
				EMIT1(add_1mod(0x40, AUX_REG));
			/* mul(q) r11 */
			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));

			if (dst_reg != BPF_REG_3)
				EMIT1(0x5A); /* pop rdx */
			if (dst_reg != BPF_REG_0) {
				/* mov dst_reg, rax */
				EMIT_mov(dst_reg, BPF_REG_0);
				EMIT1(0x58); /* pop rax */
			}
			break;
		}
			/* Shifts */
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_ARSH | BPF_K:
		case BPF_ALU64 | BPF_LSH | BPF_K:
		case BPF_ALU64 | BPF_RSH | BPF_K:
		case BPF_ALU64 | BPF_ARSH | BPF_K:
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}

			if (imm32 == 1)
				EMIT2(0xD1, add_1reg(b3, dst_reg));
			else
				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
			break;

		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_ARSH | BPF_X:
		case BPF_ALU64 | BPF_LSH | BPF_X:
		case BPF_ALU64 | BPF_RSH | BPF_X:
		case BPF_ALU64 | BPF_ARSH | BPF_X:

			/* Check for bad case when dst_reg == rcx */
			if (dst_reg == BPF_REG_4) {
				/* mov r11, dst_reg */
				EMIT_mov(AUX_REG, dst_reg);
				dst_reg = AUX_REG;
			}

			if (src_reg != BPF_REG_4) { /* common case */
				EMIT1(0x51); /* push rcx */

				/* mov rcx, src_reg */
				EMIT_mov(BPF_REG_4, src_reg);
			}

			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
			if (BPF_CLASS(insn->code) == BPF_ALU64)
				EMIT1(add_1mod(0x48, dst_reg));
			else if (is_ereg(dst_reg))
				EMIT1(add_1mod(0x40, dst_reg));

			switch (BPF_OP(insn->code)) {
			case BPF_LSH: b3 = 0xE0; break;
			case BPF_RSH: b3 = 0xE8; break;
			case BPF_ARSH: b3 = 0xF8; break;
			}
			EMIT2(0xD3, add_1reg(b3, dst_reg));

			if (src_reg != BPF_REG_4)
				EMIT1(0x59); /* pop rcx */

			if (insn->dst_reg == BPF_REG_4)
				/* mov dst_reg, r11 */
				EMIT_mov(insn->dst_reg, AUX_REG);
			break;

		case BPF_ALU | BPF_END | BPF_FROM_BE:
			switch (imm32) {
			case 16:
				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
				EMIT1(0x66);
				if (is_ereg(dst_reg))
					EMIT1(0x41);
				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);

				/* Emit 'movzwl eax, ax' */
				if (is_ereg(dst_reg))
					EMIT3(0x45, 0x0F, 0xB7);
				else
					EMIT2(0x0F, 0xB7);
				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 32:
				/* Emit 'bswap eax' to swap lower 4 bytes */
				if (is_ereg(dst_reg))
					EMIT2(0x41, 0x0F);
				else
					EMIT1(0x0F);
				EMIT1(add_1reg(0xC8, dst_reg));
				break;
			case 64:
				/* Emit 'bswap rax' to swap 8 bytes */
				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
				      add_1reg(0xC8, dst_reg));
				break;
			}
			break;

		case BPF_ALU | BPF_END | BPF_FROM_LE:
			switch (imm32) {
			case 16:
				/*
				 * Emit 'movzwl eax, ax' to zero extend 16-bit
				 * into 64 bit
				 */
				if (is_ereg(dst_reg))
					EMIT3(0x45, 0x0F, 0xB7);
				else
					EMIT2(0x0F, 0xB7);
				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 32:
				/* Emit 'mov eax, eax' to clear upper 32-bits */
				if (is_ereg(dst_reg))
					EMIT1(0x45);
				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
				break;
			case 64:
				/* nop */
				break;
			}
			break;

			/* ST: *(u8*)(dst_reg + off) = imm */
		case BPF_ST | BPF_MEM | BPF_B:
			if (is_ereg(dst_reg))
				EMIT2(0x41, 0xC6);
			else
				EMIT1(0xC6);
			goto st;
		case BPF_ST | BPF_MEM | BPF_H:
			if (is_ereg(dst_reg))
				EMIT3(0x66, 0x41, 0xC7);
			else
				EMIT2(0x66, 0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_W:
			if (is_ereg(dst_reg))
				EMIT2(0x41, 0xC7);
			else
				EMIT1(0xC7);
			goto st;
		case BPF_ST | BPF_MEM | BPF_DW:
			EMIT2(add_1mod(0x48, dst_reg), 0xC7);

st:			if (is_imm8(insn->off))
				EMIT2(add_1reg(0x40, dst_reg), insn->off);
			else
				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);

			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
			break;

			/* STX: *(u8*)(dst_reg + off) = src_reg */
		case BPF_STX | BPF_MEM | BPF_B:
			/* Emit 'mov byte ptr [rax + off], al' */
			if (is_ereg(dst_reg) || is_ereg(src_reg) ||
			    /* We have to add extra byte for x86 SIL, DIL regs */
			    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
			else
				EMIT1(0x88);
			goto stx;
		case BPF_STX | BPF_MEM | BPF_H:
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
			else
				EMIT2(0x66, 0x89);
			goto stx;
		case BPF_STX | BPF_MEM | BPF_W:
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
			else
				EMIT1(0x89);
			goto stx;
		case BPF_STX | BPF_MEM | BPF_DW:
			EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
stx:			if (is_imm8(insn->off))
				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
			else
				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
					    insn->off);
			break;

			/* LDX: dst_reg = *(u8*)(src_reg + off) */
		case BPF_LDX | BPF_MEM | BPF_B:
			/* Emit 'movzx rax, byte ptr [rax + off]' */
			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
			goto ldx;
		case BPF_LDX | BPF_MEM | BPF_H:
			/* Emit 'movzx rax, word ptr [rax + off]' */
			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
			goto ldx;
		case BPF_LDX | BPF_MEM | BPF_W:
			/* Emit 'mov eax, dword ptr [rax+0x14]' */
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
			else
				EMIT1(0x8B);
			goto ldx;
		case BPF_LDX | BPF_MEM | BPF_DW:
			/* Emit 'mov rax, qword ptr [rax+0x14]' */
			EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
ldx:			/*
			 * If insn->off == 0 we can save one extra byte, but
			 * special case of x86 R13 which always needs an offset
			 * is not worth the hassle
			 */
			if (is_imm8(insn->off))
				EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
			else
				EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
					    insn->off);
			break;

			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
		case BPF_STX | BPF_XADD | BPF_W:
			/* Emit 'lock add dword ptr [rax + off], eax' */
			if (is_ereg(dst_reg) || is_ereg(src_reg))
				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
			else
				EMIT2(0xF0, 0x01);
			goto xadd;
		case BPF_STX | BPF_XADD | BPF_DW:
			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
xadd:			if (is_imm8(insn->off))
				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
			else
				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
					    insn->off);
			break;

			/* call */
		case BPF_JMP | BPF_CALL:
			func = (u8 *) __bpf_call_base + imm32;
			jmp_offset = func - (image + addrs[i]);
			if (!imm32 || !is_simm32(jmp_offset)) {
				pr_err("unsupported BPF func %d addr %p image %p\n",
				       imm32, func, image);
				return -EINVAL;
			}
			EMIT1_off32(0xE8, jmp_offset);
			break;

		case BPF_JMP | BPF_TAIL_CALL:
			emit_bpf_tail_call(&prog);
			break;

			/* cond jump */
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JNE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JLT | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JLE | BPF_X:
		case BPF_JMP | BPF_JSGT | BPF_X:
		case BPF_JMP | BPF_JSLT | BPF_X:
		case BPF_JMP | BPF_JSGE | BPF_X:
		case BPF_JMP | BPF_JSLE | BPF_X:
			/* cmp dst_reg, src_reg */
			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
			      add_2reg(0xC0, dst_reg, src_reg));
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_X:
			/* test dst_reg, src_reg */
			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
			      add_2reg(0xC0, dst_reg, src_reg));
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JSET | BPF_K:
			/* test dst_reg, imm32 */
			EMIT1(add_1mod(0x48, dst_reg));
			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
			goto emit_cond_jmp;

		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JNE | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JLT | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JLE | BPF_K:
		case BPF_JMP | BPF_JSGT | BPF_K:
		case BPF_JMP | BPF_JSLT | BPF_K:
		case BPF_JMP | BPF_JSGE | BPF_K:
		case BPF_JMP | BPF_JSLE | BPF_K:
			/* cmp dst_reg, imm8/32 */
			EMIT1(add_1mod(0x48, dst_reg));

			if (is_imm8(imm32))
				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
			else
				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);

emit_cond_jmp:		/* Convert BPF opcode to x86 */
			switch (BPF_OP(insn->code)) {
			case BPF_JEQ:
				jmp_cond = X86_JE;
				break;
			case BPF_JSET:
			case BPF_JNE:
				jmp_cond = X86_JNE;
				break;
			case BPF_JGT:
				/* GT is unsigned '>', JA in x86 */
				jmp_cond = X86_JA;
				break;
			case BPF_JLT:
				/* LT is unsigned '<', JB in x86 */
				jmp_cond = X86_JB;
				break;
			case BPF_JGE:
				/* GE is unsigned '>=', JAE in x86 */
				jmp_cond = X86_JAE;
				break;
			case BPF_JLE:
				/* LE is unsigned '<=', JBE in x86 */
				jmp_cond = X86_JBE;
				break;
			case BPF_JSGT:
				/* Signed '>', GT in x86 */
				jmp_cond = X86_JG;
				break;
			case BPF_JSLT:
				/* Signed '<', LT in x86 */
				jmp_cond = X86_JL;
				break;
			case BPF_JSGE:
				/* Signed '>=', GE in x86 */
				jmp_cond = X86_JGE;
				break;
			case BPF_JSLE:
				/* Signed '<=', LE in x86 */
				jmp_cond = X86_JLE;
				break;
			default: /* to silence GCC warning */
				return -EFAULT;
			}
			jmp_offset = addrs[i + insn->off] - addrs[i];
			if (is_imm8(jmp_offset)) {
				EMIT2(jmp_cond, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
			} else {
				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}

			break;

		case BPF_JMP | BPF_JA:
			if (insn->off == -1)
				/* -1 jmp instructions will always jump
				 * backwards two bytes. Explicitly handling
				 * this case avoids wasting too many passes
				 * when there are long sequences of replaced
				 * dead code.
				 */
				jmp_offset = -2;
			else
				jmp_offset = addrs[i + insn->off] - addrs[i];

			if (!jmp_offset)
				/* Optimize out nop jumps */
				break;
emit_jmp:
			if (is_imm8(jmp_offset)) {
				EMIT2(0xEB, jmp_offset);
			} else if (is_simm32(jmp_offset)) {
				EMIT1_off32(0xE9, jmp_offset);
			} else {
				pr_err("jmp gen bug %llx\n", jmp_offset);
				return -EFAULT;
			}
			break;

		case BPF_JMP | BPF_EXIT:
			if (seen_exit) {
				jmp_offset = ctx->cleanup_addr - addrs[i];
				goto emit_jmp;
			}
			seen_exit = true;
			/* Update cleanup_addr */
			ctx->cleanup_addr = proglen;
			/* mov rbx, qword ptr [rbp+0] */
			EMIT4(0x48, 0x8B, 0x5D, 0);
			/* mov r13, qword ptr [rbp+8] */
			EMIT4(0x4C, 0x8B, 0x6D, 8);
			/* mov r14, qword ptr [rbp+16] */
			EMIT4(0x4C, 0x8B, 0x75, 16);
			/* mov r15, qword ptr [rbp+24] */
			EMIT4(0x4C, 0x8B, 0x7D, 24);

			/* add rbp, AUX_STACK_SPACE */
			EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE);
			EMIT1(0xC9); /* leave */
			EMIT1(0xC3); /* ret */
			break;

		default:
			/*
			 * By design x86-64 JIT should support all BPF instructions.
			 * This error will be seen if new instruction was added
			 * to the interpreter, but not to the JIT, or if there is
			 * junk in bpf_prog.
			 */
			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
			return -EINVAL;
		}

		ilen = prog - temp;
		if (ilen > BPF_MAX_INSN_SIZE) {
			pr_err("bpf_jit: fatal insn size error\n");
			return -EFAULT;
		}

		if (image) {
			if (unlikely(proglen + ilen > oldproglen)) {
				pr_err("bpf_jit: fatal error\n");
				return -EFAULT;
			}
			memcpy(image + proglen, temp, ilen);
		}
		proglen += ilen;
		addrs[i] = proglen;
		prog = temp;
	}
	return proglen;
}