Exemplo n.º 1
0
/*
 * emit REX byte
 */
static void
emit_rex(struct bpf_jit_state *st, uint32_t op, uint32_t reg, uint32_t rm)
{
	uint8_t rex;

	/* mark operand registers as used*/
	USED(st->reguse, reg);
	USED(st->reguse, rm);

	rex = 0;
	if (BPF_CLASS(op) == EBPF_ALU64 ||
			op == (BPF_ST | BPF_MEM | EBPF_DW) ||
			op == (BPF_STX | BPF_MEM | EBPF_DW) ||
			op == (BPF_STX | EBPF_XADD | EBPF_DW) ||
			op == (BPF_LD | BPF_IMM | EBPF_DW) ||
			(BPF_CLASS(op) == BPF_LDX &&
			BPF_MODE(op) == BPF_MEM &&
			BPF_SIZE(op) != BPF_W))
		rex |= REX_W;

	if (IS_EXT_REG(reg))
		rex |= REX_R;

	if (IS_EXT_REG(rm))
		rex |= REX_B;

	/* store using SIL, DIL */
	if (op == (BPF_STX | BPF_MEM | BPF_B) && (reg == RDI || reg == RSI))
		rex |= REX_PREFIX;

	if (rex != 0) {
		rex |= REX_PREFIX;
		emit_bytes(st, &rex, sizeof(rex));
	}
}
Exemplo n.º 2
0
void flattenGraphTraversal(struct flatten_item * graph, struct bpf_program * filter, int indice, int level)
{
    int j;
    struct bpf_insn *insn = &filter->bf_insns[  graph[indice].bf_instruct_link  ];
    
    if(level > 50)
        return;
     
    for(j = 0;j< level;j++)
    {
        printf(" ");
    }

    printf("(%d : %d) fnode (code:%u - k:%u)\n",level, indice, insn->code, insn->k);
    /*printf("(%d) %s\n",level, bpf_image(insn,indice));*/
    if(BPF_CLASS(insn->code) == BPF_JMP)
    {
        if(BPF_OP(insn->code) == BPF_JA)
        {
            flattenGraphTraversal(graph, filter, graph[indice].true_instruct, level+1);
        }
        else
        {
            flattenGraphTraversal(graph, filter, graph[indice].true_instruct, level+1);
            flattenGraphTraversal(graph, filter, graph[indice].false_instruct, level+1);
        }
    }
    else if(BPF_CLASS(insn->code) != BPF_RET)
    {
        flattenGraphTraversal(graph, filter, graph[indice].true_instruct, level+1);
    }
}
Exemplo n.º 3
0
/* Because we really don't have an IR, this stuff is a little messy. */
static int
F(int code, int v0, int v1)
{
	u_int hash;
	int val;
	struct valnode *p;

	hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
	hash %= MODULUS;

	for (p = hashtbl[hash]; p; p = p->next)
		if (p->code == code && p->v0 == v0 && p->v1 == v1)
			return p->val;

	val = ++curval;
	if (BPF_MODE(code) == BPF_IMM &&
	    (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
		vmap[val].const_val = v0;
		vmap[val].is_const = 1;
	}
	p = next_vnode++;
	p->val = val;
	p->code = code;
	p->v0 = v0;
	p->v1 = v1;
	p->next = hashtbl[hash];
	hashtbl[hash] = p;

	return val;
}
Exemplo n.º 4
0
/*
 * cette fonction compare le graphe existant avec le nouveau graphe applati
 *
 * @param graph : le graphe applati
 * @param filter : le graphe bpf correspondant
 * @param indice : l'indice du noeud a comparer
 * @param node : la liste de noeud pouvant contenir un noeud similaire
 * @return : le nombre de noeud equivalent dans l'arbre
 *
 */
unsigned int compareGraphExploreChild(struct flatten_item * graph, struct bpf_program * filter, int indice, struct filter_node * node,unsigned int * collide)
{
    int i = graph[indice].bf_instruct_link;
    unsigned int local_collide = 0, child_collide_true = 0, child_collide_false = 0;
    unsigned int ret = 0;
    struct filter_node * current_node = node;
    
    /*recherche d'un noeud racine correspondant*/
    for( ; current_node != NULL ; current_node = current_node->next)
    {
        if(current_node->item->code == filter->bf_insns[i].code && current_node->item->k == filter->bf_insns[i].k)
        {
            break;
        }
    }
    
    /*pas de noeud correspondant, on devra insérer ici*/
    if(current_node == NULL)
    {
        if(indice > 0 && node!= NULL)
        {
            /*on a un noeud pouvant causer un breakpoint*/
            local_collide = 1;
        }
        
        *collide = local_collide;
        return 0;
    }
    else
    {
        if(indice > 0 && node != NULL && node->next!=NULL)
        {
            local_collide = 1;
        }
    }
    
    if(BPF_CLASS(filter->bf_insns[i].code) == BPF_JMP && BPF_OP(filter->bf_insns[i].code) != BPF_JA) /*noeud conditionnel*/
    {
        ret = 1 + compareGraphExploreChild(graph, filter, graph[indice].true_instruct, current_node->item->next_child_t,&child_collide_true)
                 + compareGraphExploreChild(graph, filter, graph[indice].false_instruct, current_node->item->next_child_f, &child_collide_false);
    }
    else if(BPF_CLASS(filter->bf_insns[i].code) != BPF_RET) /*noeud normal*/
    {
        ret = 1 + compareGraphExploreChild(graph, filter, graph[indice].true_instruct, current_node->item->next_child_t,&child_collide_true);
    }
    /*else
    {
        on ne se soucie pas des noeuds terminaux, ils seront ajoutés quoi qu'il arrive, c'est necessaire pour la suppression
    }*/

    *collide = local_collide + (child_collide_true > child_collide_false)?child_collide_true:child_collide_false;

    return ret;
}
Exemplo n.º 5
0
/*
 * XXX Copied from sys/net/bpf_filter.c and modified.
 *
 * Return true if the 'fcode' is a valid filter program.
 * The constraints are that each jump be forward and to a valid
 * code.  The code must terminate with either an accept or reject.
 *
 * The kernel needs to be able to verify an application's filter code.
 * Otherwise, a bogus program could easily crash the system.
 */
static int
bpf_validate(const struct bpf_insn *f, int len)
{
	register int i;
	register const struct bpf_insn *p;

	/* Do not accept negative length filter. */
	if (len < 0)
		return (0);

	/* An empty filter means accept all. */
	if (len == 0)
		return (1);

	for (i = 0; i < len; ++i) {
		p = &f[i];
		/*
		 * Check that the code is valid.
		 */
		if (!BPF_VALIDATE_CODE(p->code))
			return (0);
		/*
		 * Check that that jumps are forward, and within
		 * the code block.
		 */
		if (BPF_CLASS(p->code) == BPF_JMP) {
			register u_int offset;

			if (p->code == (BPF_JMP|BPF_JA))
				offset = p->k;
			else
				offset = p->jt > p->jf ? p->jt : p->jf;
			if (offset >= (u_int)(len - i) - 1)
				return (0);
			continue;
		}
		/*
		 * Check that memory operations use valid addresses.
		 */
		if (p->code == BPF_ST || p->code == BPF_STX ||
		    p->code == (BPF_LD|BPF_MEM) ||
		    p->code == (BPF_LDX|BPF_MEM)) {
			if (p->k >= BPF_MEMWORDS)
				return (0);
			continue;
		}
		/*
		 * Check for constant division by 0.
		 */
		if (p->code == (BPF_ALU|BPF_DIV|BPF_K) && p->k == 0)
			return (0);
	}
	return (BPF_CLASS(f[len - 1].code) == BPF_RET);
}
Exemplo n.º 6
0
/**
 *	sk_chk_filter - verify socket filter code
 *	@filter: filter to verify
 *	@flen: length of filter
 *
 * Check the user's filter code. If we let some ugly
 * filter code slip through kaboom! The filter must contain
 * no references or jumps that are out of range, no illegal instructions
 * and no backward jumps. It must end with a RET instruction
 *
 * Returns 0 if the rule set is legal or a negative errno code if not.
 */
int sk_chk_filter(struct sock_filter *filter, int flen)
{
    struct sock_filter *ftest;
    int pc;

    if (((unsigned int)flen >= (~0U / sizeof(struct sock_filter))) || flen == 0)
        return -EINVAL;

    /* check the filter code now */
    for (pc = 0; pc < flen; pc++) {
        /* all jumps are forward as they are not signed */
        ftest = &filter[pc];
        if (BPF_CLASS(ftest->code) == BPF_JMP) {
            /* but they mustn't jump off the end */
            if (BPF_OP(ftest->code) == BPF_JA) {
                /*
                 * Note, the large ftest->k might cause loops.
                 * Compare this with conditional jumps below,
                 * where offsets are limited. --ANK (981016)
                 */
                if (ftest->k >= (unsigned)(flen-pc-1))
                    return -EINVAL;
            } else {
                /* for conditionals both must be safe */
                if (pc + ftest->jt +1 >= flen ||
                        pc + ftest->jf +1 >= flen)
                    return -EINVAL;
            }
        }

        /* check that memory operations use valid addresses. */
        if (ftest->k >= BPF_MEMWORDS) {
            /* but it might not be a memory operation... */
            switch (ftest->code) {
            case BPF_ST:
            case BPF_STX:
            case BPF_LD|BPF_MEM:
            case BPF_LDX|BPF_MEM:
                return -EINVAL;
            }
        }
    }

    /*
     * The program must end with a return. We don't care where they
     * jumped within the script (its always forwards) but in the end
     * they _will_ hit this.
     */
    return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
}
Exemplo n.º 7
0
static void
decode_bpf_code(uint16_t code)
{
	uint16_t i = code & ~BPF_CLASS(code);

	printxval(bpf_class, BPF_CLASS(code), "BPF_???");
	switch (BPF_CLASS(code)) {
		case BPF_LD:
		case BPF_LDX:
			tprints(" | ");
			printxval(bpf_size, BPF_SIZE(code), "BPF_???");
			tprints(" | ");
			printxval(bpf_mode, BPF_MODE(code), "BPF_???");
			break;
		case BPF_ST:
		case BPF_STX:
			if (i)
				tprintf(" | %#x /* %s */", i, "BPF_???");
			break;
		case BPF_ALU:
			tprints(" | ");
			printxval(bpf_src, BPF_SRC(code), "BPF_???");
			tprints(" | ");
			printxval(bpf_op_alu, BPF_OP(code), "BPF_???");
			break;
		case BPF_JMP:
			tprints(" | ");
			printxval(bpf_src, BPF_SRC(code), "BPF_???");
			tprints(" | ");
			printxval(bpf_op_jmp, BPF_OP(code), "BPF_???");
			break;
		case BPF_RET:
			tprints(" | ");
			printxval(bpf_rval, BPF_RVAL(code), "BPF_???");
			i &= ~BPF_RVAL(code);
			if (i)
				tprintf(" | %#x /* %s */", i, "BPF_???");
			break;
		case BPF_MISC:
			tprints(" | ");
			printxval(bpf_miscop, BPF_MISCOP(code), "BPF_???");
			i &= ~BPF_MISCOP(code);
			if (i)
				tprintf(" | %#x /* %s */", i, "BPF_???");
			break;
	}

}
Exemplo n.º 8
0
static void
fixup_jumps(npf_bpf_t *ctx, u_int start, u_int end, bool swap)
{
	struct bpf_program *bp = &ctx->prog;

	for (u_int i = start; i < end; i++) {
		struct bpf_insn *insn = &bp->bf_insns[i];
		const u_int fail_off = end - i;

		if (fail_off >= JUMP_MAGIC) {
			errx(EXIT_FAILURE, "BPF generation error: "
			    "the number of instructions is over the limit");
		}
		if (BPF_CLASS(insn->code) != BPF_JMP) {
			continue;
		}
		if (swap) {
			uint8_t jt = insn->jt;
			insn->jt = insn->jf;
			insn->jf = jt;
		}
		if (insn->jt == JUMP_MAGIC)
			insn->jt = fail_off;
		if (insn->jf == JUMP_MAGIC)
			insn->jf = fail_off;
	}
}
Exemplo n.º 9
0
/*
 * cette fonction met a plat un graphe bpf
 *
 * @param graph : le graphe applati à remplir
 * @param filter : le graphe bpf à convertir
 * @param non_terminal_count : le nombre de noeud non terminaux dans le graphe applati
 * @param terminal_count : le nombre de noeud terminaux dans le graphe
 * @param i : l'indice de l'instruction dans le graphe bpf
 * @param id : le prochain identifiant disponible pour ajouter un noeud
 * @return : l'id du noeud ajouté dans l'appel de la fonction
 */
int flatteGraph(struct flatten_item * graph, struct bpf_program * filter,int non_terminal_count, int terminal_count, int i, int * id)
{
    int current_id, indice;
    
    if(BPF_CLASS(filter->bf_insns[i].code) == BPF_JMP) /*JUMP INSTRUCTION*/
    {
        current_id = (*id)++;
        graph[current_id].bf_instruct_link = i;
        
        if(BPF_OP(filter->bf_insns[i].code) == BPF_JA)
        {
            graph[current_id].true_instruct = flatteGraph(graph, filter, non_terminal_count, terminal_count, filter->bf_insns[i].k+i+1,id);
        }
        else
        {
            graph[current_id].true_instruct = flatteGraph(graph, filter, non_terminal_count, terminal_count, filter->bf_insns[i].jt+i+1, id);
            graph[current_id].false_instruct = flatteGraph(graph, filter, non_terminal_count, terminal_count, filter->bf_insns[i].jf+i+1, id);
        }
    }
    else if(BPF_CLASS(filter->bf_insns[i].code) != BPF_RET) /*OTHER INSTRUCTION*/
    {
        current_id = (*id)++;
        graph[current_id].bf_instruct_link = i;
        graph[current_id].true_instruct = flatteGraph(graph, filter, non_terminal_count, terminal_count, i+1,id);
    }
    else /*TERMINAL INSTRUCTION*/
    {                
        for(indice = 0 ; (indice<terminal_count) && ( graph[non_terminal_count + indice].bf_instruct_link != -1) ; indice++)
        {
            if(graph[non_terminal_count + indice].bf_instruct_link == i)
            {
                graph[non_terminal_count + indice].true_instruct += 1;
                return non_terminal_count + indice;
            }
        }
        
        /*noeud terminal non encore recontré, on l'ajoute*/
        current_id = non_terminal_count + indice;
        graph[current_id].bf_instruct_link = i;
        graph[current_id].true_instruct = 1;
        graph[current_id].false_instruct = indice;
    }
    
    return current_id;
}
Exemplo n.º 10
0
/*
 * Compute the sets of registers used, defined, and killed by 'b'.
 *
 * "Used" means that a statement in 'b' uses the register before any
 * statement in 'b' defines it, i.e. it uses the value left in
 * that register by a predecessor block of this block.
 * "Defined" means that a statement in 'b' defines it.
 * "Killed" means that a statement in 'b' defines it before any
 * statement in 'b' uses it, i.e. it kills the value left in that
 * register by a predecessor block of this block.
 */
static void
compute_local_ud(struct block *b)
{
	struct slist *s;
	atomset def = 0, use = 0, kill = 0;
	int atom;

	for (s = b->stmts; s; s = s->next) {
		if (s->s.code == NOP)
			continue;
		atom = atomuse(&s->s);
		if (atom >= 0) {
			if (atom == AX_ATOM) {
				if (!ATOMELEM(def, X_ATOM))
					use |= ATOMMASK(X_ATOM);
				if (!ATOMELEM(def, A_ATOM))
					use |= ATOMMASK(A_ATOM);
			}
			else if (atom < N_ATOMS) {
				if (!ATOMELEM(def, atom))
					use |= ATOMMASK(atom);
			}
			else
				abort();
		}
		atom = atomdef(&s->s);
		if (atom >= 0) {
			if (!ATOMELEM(use, atom))
				kill |= ATOMMASK(atom);
			def |= ATOMMASK(atom);
		}
	}
	if (BPF_CLASS(b->s.code) == BPF_JMP) {
		/*
		 * XXX - what about RET?
		 */
		atom = atomuse(&b->s);
		if (atom >= 0) {
			if (atom == AX_ATOM) {
				if (!ATOMELEM(def, X_ATOM))
					use |= ATOMMASK(X_ATOM);
				if (!ATOMELEM(def, A_ATOM))
					use |= ATOMMASK(A_ATOM);
			}
			else if (atom < N_ATOMS) {
				if (!ATOMELEM(def, atom))
					use |= ATOMMASK(atom);
			}
			else
				abort();
		}
	}

	b->def = def;
	b->kill = kill;
	b->in_use = use;
}
Exemplo n.º 11
0
static void
make_marks(struct block *p)
{
	if (!isMarked(p)) {
		Mark(p);
		if (BPF_CLASS(p->s.code) != BPF_RET) {
			make_marks(JT(p));
			make_marks(JF(p));
		}
	}
}
Exemplo n.º 12
0
/*
 * emit mov %<sreg>, %<dreg>
 */
static void
emit_mov_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
	uint32_t dreg)
{
	const uint8_t ops = 0x89;

	/* if operands are 32-bit, then it can be used to clear upper 32-bit */
	if (sreg != dreg || BPF_CLASS(op) == BPF_ALU) {
		emit_rex(st, op, sreg, dreg);
		emit_bytes(st, &ops, sizeof(ops));
		emit_modregrm(st, MOD_DIRECT, sreg, dreg);
	}
}
Exemplo n.º 13
0
static void
opt_root(struct block **b)
{
	struct slist *tmp, *s;

	s = (*b)->stmts;
	(*b)->stmts = 0;
	while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
		*b = JT(*b);

	tmp = (*b)->stmts;
	if (tmp != 0)
		sappend(s, tmp);
	(*b)->stmts = s;

	/*
	 * If the root node is a return, then there is no
	 * point executing any statements (since the bpf machine
	 * has no side effects).
	 */
	if (BPF_CLASS((*b)->s.code) == BPF_RET)
		(*b)->stmts = 0;
}
Exemplo n.º 14
0
/*
 *  cette methode explore le graphe existant et compte le nombre de noeud NON TERMINAUX necessaire pour la mise a plat du graphe   
 */
unsigned int countNoTerminalNodes(struct bpf_program * filter, int i)
{
    if(BPF_CLASS(filter->bf_insns[i].code) == BPF_JMP)
    {
        if(BPF_OP(filter->bf_insns[i].code) == BPF_JA)
        {
            return 1 + countNoTerminalNodes(filter,filter->bf_insns[i].k+i+1);
        }
        else
        {
            return 1 + countNoTerminalNodes(filter,filter->bf_insns[i].jt+i+1) 
            + countNoTerminalNodes(filter,filter->bf_insns[i].jf+i+1);
        }
    }
    else if(BPF_CLASS(filter->bf_insns[i].code) != BPF_RET)
    {
        return 1 + countNoTerminalNodes(filter,i+1);
    }
    
    /*on ne compte pas les noeuds terminaux ici*/
    
    return 0;
}
Exemplo n.º 15
0
/*
 *  cette methode explore le graphe existant et compte le nombre de noeud TERMINAUX necessaire pour la mise a plat du graphe   
 */
unsigned int countTerminalNodes(struct bpf_program * filter)
{
    unsigned int ret = 0, i;
    
    for(i=0;i<filter->bf_len;i++)
    {
        if(  BPF_CLASS(filter->bf_insns[i].code) == BPF_RET )
        {
            ret += 1;
        }
    }
    
    return ret;
}
Exemplo n.º 16
0
/*
 * Scan the filter program and find possible optimization.
 */
static int
bpf_jit_optimize(struct bpf_insn *prog, u_int nins)
{
	int flags;
	u_int i;

	/* Do we return immediately? */
	if (BPF_CLASS(prog[0].code) == BPF_RET)
		return (BPF_JIT_FRET);

	for (flags = 0, i = 0; i < nins; i++) {
		switch (prog[i].code) {
		case BPF_LD|BPF_W|BPF_ABS:
		case BPF_LD|BPF_H|BPF_ABS:
		case BPF_LD|BPF_B|BPF_ABS:
		case BPF_LD|BPF_W|BPF_IND:
		case BPF_LD|BPF_H|BPF_IND:
		case BPF_LD|BPF_B|BPF_IND:
		case BPF_LDX|BPF_MSH|BPF_B:
			flags |= BPF_JIT_FPKT;
			break;
		case BPF_LD|BPF_MEM:
		case BPF_LDX|BPF_MEM:
		case BPF_ST:
		case BPF_STX:
			flags |= BPF_JIT_FMEM;
			break;
		case BPF_LD|BPF_W|BPF_LEN:
		case BPF_LDX|BPF_W|BPF_LEN:
			flags |= BPF_JIT_FLEN;
			break;
		case BPF_JMP|BPF_JA:
		case BPF_JMP|BPF_JGT|BPF_K:
		case BPF_JMP|BPF_JGE|BPF_K:
		case BPF_JMP|BPF_JEQ|BPF_K:
		case BPF_JMP|BPF_JSET|BPF_K:
		case BPF_JMP|BPF_JGT|BPF_X:
		case BPF_JMP|BPF_JGE|BPF_X:
		case BPF_JMP|BPF_JEQ|BPF_X:
		case BPF_JMP|BPF_JSET|BPF_X:
			flags |= BPF_JIT_FJMP;
			break;
		}
		if (flags == BPF_JIT_FLAG_ALL)
			break;
	}

	return (flags);
}
Exemplo n.º 17
0
/*
 * emit mov <imm>, %<dreg>
 */
static void
emit_mov_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm)
{
	const uint8_t ops = 0xC7;

	if (imm == 0) {
		/* replace 'mov 0, %<dst>' with 'xor %<dst>, %<dst>' */
		op = BPF_CLASS(op) | BPF_XOR | BPF_X;
		emit_alu_reg(st, op, dreg, dreg);
		return;
	}

	emit_rex(st, op, 0, dreg);
	emit_bytes(st, &ops, sizeof(ops));
	emit_modregrm(st, MOD_DIRECT, 0, dreg);
	emit_imm(st, imm, sizeof(imm));
}
Exemplo n.º 18
0
/*
 * emit one of:
 *   mov %<sreg>, <ofs>(%<dreg>)
 *   mov <imm>, <ofs>(%<dreg>)
 */
static void
emit_st_common(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
	uint32_t dreg, uint32_t imm, int32_t ofs)
{
	uint32_t mods, imsz, opsz, opx;
	const uint8_t prfx16 = 0x66;

	/* 8 bit instruction opcodes */
	static const uint8_t op8[] = {0xC6, 0x88};

	/* 16/32/64 bit instruction opcodes */
	static const uint8_t ops[] = {0xC7, 0x89};

	/* is the instruction has immediate value or src reg? */
	opx = (BPF_CLASS(op) == BPF_STX);

	opsz = BPF_SIZE(op);
	if (opsz == BPF_H)
		emit_bytes(st, &prfx16, sizeof(prfx16));

	emit_rex(st, op, sreg, dreg);

	if (opsz == BPF_B)
		emit_bytes(st, &op8[opx], sizeof(op8[opx]));
	else
		emit_bytes(st, &ops[opx], sizeof(ops[opx]));

	imsz = imm_size(ofs);
	mods = (imsz == 1) ? MOD_IDISP8 : MOD_IDISP32;

	emit_modregrm(st, mods, sreg, dreg);

	if (dreg == RSP || dreg == R12)
		emit_sib(st, SIB_SCALE_1, dreg, dreg);

	emit_imm(st, ofs, imsz);

	if (opx == 0) {
		imsz = RTE_MIN(bpf_size(opsz), sizeof(imm));
		emit_imm(st, imm, imsz);
	}
}
Exemplo n.º 19
0
static void
decode_bpf_stmt(const struct bpf_filter *filter)
{
#ifdef HAVE_LINUX_FILTER_H
	tprints("BPF_STMT(");
	decode_bpf_code(filter->code);
	tprints(", ");
	if (BPF_CLASS(filter->code) == BPF_RET) {
		unsigned int action = SECCOMP_RET_ACTION & filter->k;
		unsigned int data = filter->k & ~action;

		printxval(seccomp_ret_action, action, "SECCOMP_RET_???");
		if (data)
			tprintf(" | %#x)", data);
		else
			tprints(")");
	} else {
		tprintf("%#x)", filter->k);
	}
#else
	tprintf("BPF_STMT(%#x, %#x)", filter->code, filter->k);
#endif /* HAVE_LINUX_FILTER_H */
}
Exemplo n.º 20
0
void innerTraversal(const char * pre,struct filter_item * i, int level)
{
    struct filter_node * node_tmp = NULL;
    int j;
    
    if(i == NULL)
    {
        return;
    }
    
    for(j = 0;j< level;j++)
    {
        printf(" ");
    }
    printf("%s",pre);
    printf("(%d) fnode (code:%u - k:%u) \n",level, i->code, i->k);
    
    if(BPF_CLASS(i->code) != BPF_RET)
    {
        node_tmp = i->next_child_t;
        while(node_tmp != NULL)
        {
            if(i->next_child_f == NULL)
                innerTraversal("",node_tmp->item, level+1);
            else
                innerTraversal("(t):",node_tmp->item, level+1);
            node_tmp = node_tmp->next;
        }

        node_tmp = i->next_child_f;
        while(node_tmp != NULL)
        {
            innerTraversal("(f):",node_tmp->item, level+1);
            node_tmp = node_tmp->next;
        }
    }
}
Exemplo n.º 21
0
/*
 * Return the register number that is used by s.  If A and X are both
 * used, return AX_ATOM.  If no register is used, return -1.
 *
 * The implementation should probably change to an array access.
 */
static int
atomuse(struct stmt *s)
{
	register int c = s->code;

	if (c == NOP)
		return -1;

	switch (BPF_CLASS(c)) {

	case BPF_RET:
		return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
			(BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;

	case BPF_LD:
	case BPF_LDX:
		return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
			(BPF_MODE(c) == BPF_MEM) ? s->k : -1;

	case BPF_ST:
		return A_ATOM;

	case BPF_STX:
		return X_ATOM;

	case BPF_JMP:
	case BPF_ALU:
		if (BPF_SRC(c) == BPF_X)
			return AX_ATOM;
		return A_ATOM;

	case BPF_MISC:
		return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
	}
	abort();
	/* NOTREACHED */
}
Exemplo n.º 22
0
static int add_monitor_filter(int s)
{
    int idx;

    /* rewrite all PASS/FAIL jump offsets */
    for (idx = 0; idx < msock_filter.len; idx++)
    {
        struct sock_filter *insn = &msock_filter_insns[idx];

        if (BPF_CLASS(insn->code) == BPF_JMP) {
            if (insn->code == (BPF_JMP | BPF_JA)) {
                if (insn->k == PASS)
                    insn->k = msock_filter.len - idx - 2;
                else if (insn->k == FAIL)
                    insn->k = msock_filter.len - idx - 3;
            }

            if (insn->jt == PASS)
                insn->jt = msock_filter.len - idx - 2;
            else if (insn->jt == FAIL)
                insn->jt = msock_filter.len - idx - 3;

            if (insn->jf == PASS)
                insn->jf = msock_filter.len - idx - 2;
            else if (insn->jf == FAIL)
                insn->jf = msock_filter.len - idx - 3;
        }
    }

    if (setsockopt(s, SOL_SOCKET, SO_ATTACH_FILTER,
        &msock_filter, sizeof(msock_filter))) {
        fprintf(stderr, "nl80211: setsockopt(SO_ATTACH_FILTER) failed: %s\n", strerror(errno));
        return -1;
    }

    return 0;
}
Exemplo n.º 23
0
/*
 * Return the register number that is defined by 's'.  We assume that
 * a single stmt cannot define more than one register.  If no register
 * is defined, return -1.
 *
 * The implementation should probably change to an array access.
 */
static int
atomdef(struct stmt *s)
{
	if (s->code == NOP)
		return -1;

	switch (BPF_CLASS(s->code)) {

	case BPF_LD:
	case BPF_ALU:
		return A_ATOM;

	case BPF_LDX:
		return X_ATOM;

	case BPF_ST:
	case BPF_STX:
		return s->k;

	case BPF_MISC:
		return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
	}
	return -1;
}
Exemplo n.º 24
0
/*
 * Returns true if successful.  Returns false if a branch has
 * an offset that is too large.  If so, we have marked that
 * branch so that on a subsequent iteration, it will be treated
 * properly.
 */
static int
convert_code_r(struct block *p)
{
	struct bpf_insn *dst;
	struct slist *src;
	u_int slen;
	u_int off;
	int extrajmps;		/* number of extra jumps inserted */
	struct slist **offset = NULL;

	if (p == 0 || isMarked(p))
		return (1);
	Mark(p);

	if (convert_code_r(JF(p)) == 0)
		return (0);
	if (convert_code_r(JT(p)) == 0)
		return (0);

	slen = slength(p->stmts);
	dst = ftail -= (slen + 1 + p->longjt + p->longjf);
		/* inflate length by any extra jumps */

	p->offset = dst - fstart;

	/* generate offset[] for convenience  */
	if (slen) {
		offset = (struct slist **)calloc(slen, sizeof(struct slist *));
		if (!offset) {
			bpf_error("not enough core");
			/*NOTREACHED*/
		}
	}
	src = p->stmts;
	for (off = 0; off < slen && src; off++) {
#if 0
		printf("off=%d src=%x\n", off, src);
#endif
		offset[off] = src;
		src = src->next;
	}

	off = 0;
	for (src = p->stmts; src; src = src->next) {
		if (src->s.code == NOP)
			continue;
		dst->code = (u_short)src->s.code;
		dst->k = src->s.k;

		/* fill block-local relative jump */
		if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
#if 0
			if (src->s.jt || src->s.jf) {
				bpf_error("illegal jmp destination");
				/*NOTREACHED*/
			}
#endif
			goto filled;
		}
		if (off == slen - 2)	/*???*/
			goto filled;

	    {
		u_int i;
		int jt, jf;
static const char ljerr[] = "%s for block-local relative jump: off=%d";

#if 0
		printf("code=%x off=%d %x %x\n", src->s.code,
			off, src->s.jt, src->s.jf);
#endif

		if (!src->s.jt || !src->s.jf) {
			bpf_error(ljerr, "no jmp destination", off);
			/*NOTREACHED*/
		}

		jt = jf = 0;
		for (i = 0; i < slen; i++) {
			if (offset[i] == src->s.jt) {
				if (jt) {
					bpf_error(ljerr, "multiple matches", off);
					/*NOTREACHED*/
				}

				dst->jt = i - off - 1;
				jt++;
			}
			if (offset[i] == src->s.jf) {
				if (jf) {
					bpf_error(ljerr, "multiple matches", off);
					/*NOTREACHED*/
				}
				dst->jf = i - off - 1;
				jf++;
			}
		}
		if (!jt || !jf) {
			bpf_error(ljerr, "no destination found", off);
			/*NOTREACHED*/
		}
	    }
filled:
		++dst;
		++off;
	}
	if (offset)
		free(offset);

#ifdef BDEBUG
	bids[dst - fstart] = p->id + 1;
#endif
	dst->code = (u_short)p->s.code;
	dst->k = p->s.k;
	if (JT(p)) {
		extrajmps = 0;
		off = JT(p)->offset - (p->offset + slen) - 1;
		if (off >= 256) {
		    /* offset too large for branch, must add a jump */
		    if (p->longjt == 0) {
		    	/* mark this instruction and retry */
			p->longjt++;
			return(0);
		    }
		    /* branch if T to following jump */
		    dst->jt = extrajmps;
		    extrajmps++;
		    dst[extrajmps].code = BPF_JMP|BPF_JA;
		    dst[extrajmps].k = off - extrajmps;
		}
		else
		    dst->jt = off;
		off = JF(p)->offset - (p->offset + slen) - 1;
		if (off >= 256) {
		    /* offset too large for branch, must add a jump */
		    if (p->longjf == 0) {
		    	/* mark this instruction and retry */
			p->longjf++;
			return(0);
		    }
		    /* branch if F to following jump */
		    /* if two jumps are inserted, F goes to second one */
		    dst->jf = extrajmps;
		    extrajmps++;
		    dst[extrajmps].code = BPF_JMP|BPF_JA;
		    dst[extrajmps].k = off - extrajmps;
		}
		else
		    dst->jf = off;
	}
	return (1);
}
Exemplo n.º 25
0
static char *bpf_dump(const struct sock_filter bpf, int n)
{
	int v;
	const char *fmt, *op;
	static char image[256];
	char operand[64];

	v = bpf.k;
	switch (bpf.code) {
	default:
		op = "unimp";
		fmt = "0x%x";
		v = bpf.code;
		break;
	case BPF_RET | BPF_K:
		op = "ret";
		fmt = "#0x%x";
		break;
	case BPF_RET | BPF_A:
		op = "ret";
		fmt = "";
		break;
	case BPF_LD | BPF_W | BPF_ABS:
		op = "ld";
		fmt = "[%d]";
		break;
	case BPF_LD | BPF_H | BPF_ABS:
		op = "ldh";
		fmt = "[%d]";
		break;
	case BPF_LD | BPF_B | BPF_ABS:
		op = "ldb";
		fmt = "[%d]";
		break;
	case BPF_LD | BPF_W | BPF_LEN:
		op = "ld";
		fmt = "#pktlen";
		break;
	case BPF_LD | BPF_W | BPF_IND:
		op = "ld";
		fmt = "[x + %d]";
		break;
	case BPF_LD | BPF_H | BPF_IND:
		op = "ldh";
		fmt = "[x + %d]";
		break;
	case BPF_LD | BPF_B | BPF_IND:
		op = "ldb";
		fmt = "[x + %d]";
		break;
	case BPF_LD | BPF_IMM:
		op = "ld";
		fmt = "#0x%x";
		break;
	case BPF_LDX | BPF_IMM:
		op = "ldx";
		fmt = "#0x%x";
		break;
	case BPF_LDX | BPF_MSH | BPF_B:
		op = "ldxb";
		fmt = "4*([%d]&0xf)";
		break;
	case BPF_LD | BPF_MEM:
		op = "ld";
		fmt = "M[%d]";
		break;
	case BPF_LDX | BPF_MEM:
		op = "ldx";
		fmt = "M[%d]";
		break;
	case BPF_ST:
		op = "st";
		fmt = "M[%d]";
		break;
	case BPF_STX:
		op = "stx";
		fmt = "M[%d]";
		break;
	case BPF_JMP | BPF_JA:
		op = "ja";
		fmt = "%d";
		v = n + 1 + bpf.k;
		break;
	case BPF_JMP | BPF_JGT | BPF_K:
		op = "jgt";
		fmt = "#0x%x";
		break;
	case BPF_JMP | BPF_JGE | BPF_K:
		op = "jge";
		fmt = "#0x%x";
		break;
	case BPF_JMP | BPF_JEQ | BPF_K:
		op = "jeq";
		fmt = "#0x%x";
		break;
	case BPF_JMP | BPF_JSET | BPF_K:
		op = "jset";
		fmt = "#0x%x";
		break;
	case BPF_JMP | BPF_JGT | BPF_X:
		op = "jgt";
		fmt = "x";
		break;
	case BPF_JMP | BPF_JGE | BPF_X:
		op = "jge";
		fmt = "x";
		break;
	case BPF_JMP | BPF_JEQ | BPF_X:
		op = "jeq";
		fmt = "x";
		break;
	case BPF_JMP | BPF_JSET | BPF_X:
		op = "jset";
		fmt = "x";
		break;
	case BPF_ALU | BPF_ADD | BPF_X:
		op = "add";
		fmt = "x";
		break;
	case BPF_ALU | BPF_SUB | BPF_X:
		op = "sub";
		fmt = "x";
		break;
	case BPF_ALU | BPF_MUL | BPF_X:
		op = "mul";
		fmt = "x";
		break;
	case BPF_ALU | BPF_DIV | BPF_X:
		op = "div";
		fmt = "x";
		break;
	case BPF_ALU | BPF_AND | BPF_X:
		op = "and";
		fmt = "x";
		break;
	case BPF_ALU | BPF_OR | BPF_X:
		op = "or";
		fmt = "x";
		break;
	case BPF_ALU | BPF_LSH | BPF_X:
		op = "lsh";
		fmt = "x";
		break;
	case BPF_ALU | BPF_RSH | BPF_X:
		op = "rsh";
		fmt = "x";
		break;
	case BPF_ALU | BPF_ADD | BPF_K:
		op = "add";
		fmt = "#%d";
		break;
	case BPF_ALU | BPF_SUB | BPF_K:
		op = "sub";
		fmt = "#%d";
		break;
	case BPF_ALU | BPF_MUL | BPF_K:
		op = "mul";
		fmt = "#%d";
		break;
	case BPF_ALU | BPF_DIV | BPF_K:
		op = "div";
		fmt = "#%d";
		break;
	case BPF_ALU | BPF_AND | BPF_K:
		op = "and";
		fmt = "#0x%x";
		break;
	case BPF_ALU | BPF_OR | BPF_K:
		op = "or";
		fmt = "#0x%x";
		break;
	case BPF_ALU | BPF_LSH | BPF_K:
		op = "lsh";
		fmt = "#%d";
		break;
	case BPF_ALU | BPF_RSH | BPF_K:
		op = "rsh";
		fmt = "#%d";
		break;
	case BPF_ALU | BPF_NEG:
		op = "neg";
		fmt = "";
		break;
	case BPF_MISC | BPF_TAX:
		op = "tax";
		fmt = "";
		break;
	case BPF_MISC | BPF_TXA:
		op = "txa";
		fmt = "";
		break;
	}

	slprintf(operand, sizeof(operand), fmt, v);
	slprintf(image, sizeof(image),
		 (BPF_CLASS(bpf.code) == BPF_JMP &&
		  BPF_OP(bpf.code) != BPF_JA) ?
		 " L%d: %s %s, L%d, L%d" : " L%d: %s %s",
		 n, op, operand, n + 1 + bpf.jt, n + 1 + bpf.jf);

	return image;
}
Exemplo n.º 26
0
int bpf_validate(const struct sock_fprog *bpf)
{
	uint32_t i, from;
	const struct sock_filter *p;

	if (!bpf)
		return 0;
	if (bpf->len < 1)
		return 0;

	for (i = 0; i < bpf->len; ++i) {
		p = &bpf->filter[i];
		switch (BPF_CLASS(p->code)) {
			/*
			 * Check that memory operations use valid addresses.
			 */
		case BPF_LD:
		case BPF_LDX:
			switch (BPF_MODE(p->code)) {
			case BPF_IMM:
				break;
			case BPF_ABS:
			case BPF_IND:
			case BPF_MSH:
				/*
				 * There's no maximum packet data size
				 * in userland.  The runtime packet length
				 * check suffices.
				 */
				break;
			case BPF_MEM:
				if (p->k >= BPF_MEMWORDS)
					return 0;
				break;
			case BPF_LEN:
				break;
			default:
				return 0;
			}
			break;
		case BPF_ST:
		case BPF_STX:
			if (p->k >= BPF_MEMWORDS)
				return 0;
			break;
		case BPF_ALU:
			switch (BPF_OP(p->code)) {
			case BPF_ADD:
			case BPF_SUB:
			case BPF_MUL:
			case BPF_OR:
			case BPF_AND:
			case BPF_LSH:
			case BPF_RSH:
			case BPF_NEG:
				break;
			case BPF_DIV:
				/*
				 * Check for constant division by 0.
				 */
				if (BPF_RVAL(p->code) == BPF_K && p->k == 0)
					return 0;
				break;
			default:
				return 0;
			}
			break;
		case BPF_JMP:
			/*
			 * Check that jumps are within the code block,
			 * and that unconditional branches don't go
			 * backwards as a result of an overflow.
			 * Unconditional branches have a 32-bit offset,
			 * so they could overflow; we check to make
			 * sure they don't.  Conditional branches have
			 * an 8-bit offset, and the from address is <=
			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
			 * is sufficiently small that adding 255 to it
			 * won't overflow.
			 *
			 * We know that len is <= BPF_MAXINSNS, and we
			 * assume that BPF_MAXINSNS is < the maximum size
			 * of a u_int, so that i + 1 doesn't overflow.
			 *
			 * For userland, we don't know that the from
			 * or len are <= BPF_MAXINSNS, but we know that
			 * from <= len, and, except on a 64-bit system,
			 * it's unlikely that len, if it truly reflects
			 * the size of the program we've been handed,
			 * will be anywhere near the maximum size of
			 * a u_int.  We also don't check for backward
			 * branches, as we currently support them in
			 * userland for the protochain operation.
			 */
			from = i + 1;
			switch (BPF_OP(p->code)) {
			case BPF_JA:
				if (from + p->k >= bpf->len)
					return 0;
				break;
			case BPF_JEQ:
			case BPF_JGT:
			case BPF_JGE:
			case BPF_JSET:
				if (from + p->jt >= bpf->len ||
				    from + p->jf >= bpf->len)
					return 0;
				break;
			default:
				return 0;
			}
			break;
		case BPF_RET:
			break;
		case BPF_MISC:
			break;
		default:
			return 0;
		}
	}

	return BPF_CLASS(bpf->filter[bpf->len - 1].code) == BPF_RET;
}
Exemplo n.º 27
0
static void
opt_peep(struct block *b)
{
	struct slist *s;
	struct slist *next, *last;
	int val;

	s = b->stmts;
	if (s == 0)
		return;

	last = s;
	for (/*empty*/; /*empty*/; s = next) {
		/*
		 * Skip over nops.
		 */
		s = this_op(s);
		if (s == 0)
			break;	/* nothing left in the block */

		/*
		 * Find the next real instruction after that one
		 * (skipping nops).
		 */
		next = this_op(s->next);
		if (next == 0)
			break;	/* no next instruction */
		last = next;

		/*
		 * st  M[k]	-->	st  M[k]
		 * ldx M[k]		tax
		 */
		if (s->s.code == BPF_ST &&
		    next->s.code == (BPF_LDX|BPF_MEM) &&
		    s->s.k == next->s.k) {
			done = 0;
			next->s.code = BPF_MISC|BPF_TAX;
		}
		/*
		 * ld  #k	-->	ldx  #k
		 * tax			txa
		 */
		if (s->s.code == (BPF_LD|BPF_IMM) &&
		    next->s.code == (BPF_MISC|BPF_TAX)) {
			s->s.code = BPF_LDX|BPF_IMM;
			next->s.code = BPF_MISC|BPF_TXA;
			done = 0;
		}
		/*
		 * This is an ugly special case, but it happens
		 * when you say tcp[k] or udp[k] where k is a constant.
		 */
		if (s->s.code == (BPF_LD|BPF_IMM)) {
			struct slist *add, *tax, *ild;

			/*
			 * Check that X isn't used on exit from this
			 * block (which the optimizer might cause).
			 * We know the code generator won't generate
			 * any local dependencies.
			 */
			if (ATOMELEM(b->out_use, X_ATOM))
				continue;

			/*
			 * Check that the instruction following the ldi
			 * is an addx, or it's an ldxms with an addx
			 * following it (with 0 or more nops between the
			 * ldxms and addx).
			 */
			if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
				add = next;
			else
				add = this_op(next->next);
			if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
				continue;

			/*
			 * Check that a tax follows that (with 0 or more
			 * nops between them).
			 */
			tax = this_op(add->next);
			if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
				continue;

			/*
			 * Check that an ild follows that (with 0 or more
			 * nops between them).
			 */
			ild = this_op(tax->next);
			if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
			    BPF_MODE(ild->s.code) != BPF_IND)
				continue;
			/*
			 * We want to turn this sequence:
			 *
			 * (004) ldi     #0x2		{s}
			 * (005) ldxms   [14]		{next}  -- optional
			 * (006) addx			{add}
			 * (007) tax			{tax}
			 * (008) ild     [x+0]		{ild}
			 *
			 * into this sequence:
			 *
			 * (004) nop
			 * (005) ldxms   [14]
			 * (006) nop
			 * (007) nop
			 * (008) ild     [x+2]
			 *
			 * XXX We need to check that X is not
			 * subsequently used, because we want to change
			 * what'll be in it after this sequence.
			 *
			 * We know we can eliminate the accumulator
			 * modifications earlier in the sequence since
			 * it is defined by the last stmt of this sequence
			 * (i.e., the last statement of the sequence loads
			 * a value into the accumulator, so we can eliminate
			 * earlier operations on the accumulator).
			 */
			ild->s.k += s->s.k;
			s->s.code = NOP;
			add->s.code = NOP;
			tax->s.code = NOP;
			done = 0;
		}
	}
	/*
	 * If the comparison at the end of a block is an equality
	 * comparison against a constant, and nobody uses the value
	 * we leave in the A register at the end of a block, and
	 * the operation preceding the comparison is an arithmetic
	 * operation, we can sometime optimize it away.
	 */
	if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
	    !ATOMELEM(b->out_use, A_ATOM)) {
	    	/*
	    	 * We can optimize away certain subtractions of the
	    	 * X register.
	    	 */
		if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
			val = b->val[X_ATOM];
			if (vmap[val].is_const) {
				/*
				 * If we have a subtract to do a comparison,
				 * and the X register is a known constant,
				 * we can merge this value into the
				 * comparison:
				 *
				 * sub x  ->	nop
				 * jeq #y	jeq #(x+y)
				 */
				b->s.k += vmap[val].const_val;
				last->s.code = NOP;
				done = 0;
			} else if (b->s.k == 0) {
				/*
				 * If the X register isn't a constant,
				 * and the comparison in the test is
				 * against 0, we can compare with the
				 * X register, instead:
				 *
				 * sub x  ->	nop
				 * jeq #0	jeq x
				 */
				last->s.code = NOP;
				b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
				done = 0;
			}
		}
		/*
		 * Likewise, a constant subtract can be simplified:
		 *
		 * sub #x ->	nop
		 * jeq #y ->	jeq #(x+y)
		 */
		else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
			last->s.code = NOP;
			b->s.k += last->s.k;
			done = 0;
		}
		/*
		 * And, similarly, a constant AND can be simplified
		 * if we're testing against 0, i.e.:
		 *
		 * and #k	nop
		 * jeq #0  ->	jset #k
		 */
		else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
		    b->s.k == 0) {
			b->s.k = last->s.k;
			b->s.code = BPF_JMP|BPF_K|BPF_JSET;
			last->s.code = NOP;
			done = 0;
			opt_not(b);
		}
	}
	/*
	 * jset #0        ->   never
	 * jset #ffffffff ->   always
	 */
	if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
		if (b->s.k == 0)
			JT(b) = JF(b);
		if (b->s.k == (int)0xffffffff)
			JF(b) = JT(b);
	}
	/*
	 * If we're comparing against the index register, and the index
	 * register is a known constant, we can just compare against that
	 * constant.
	 */
	val = b->val[X_ATOM];
	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
		bpf_int32 v = vmap[val].const_val;
		b->s.code &= ~BPF_X;
		b->s.k = v;
	}
	/*
	 * If the accumulator is a known constant, we can compute the
	 * comparison result.
	 */
	val = b->val[A_ATOM];
	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
		bpf_int32 v = vmap[val].const_val;
		switch (BPF_OP(b->s.code)) {

		case BPF_JEQ:
			v = v == b->s.k;
			break;

		case BPF_JGT:
			v = (unsigned)v > (unsigned)b->s.k;
			break;

		case BPF_JGE:
			v = (unsigned)v >= (unsigned)b->s.k;
			break;

		case BPF_JSET:
			v &= b->s.k;
			break;

		default:
			abort();
		}
		if (JF(b) != JT(b))
			done = 0;
		if (v)
			JF(b) = JT(b);
		else
			JT(b) = JF(b);
	}
}
Exemplo n.º 28
0
static void update_regs_access(cs_struct *ud, cs_detail *detail,
		bpf_insn insn_id, unsigned int opcode)
{
	if (insn_id == BPF_INS_INVALID)
		return;
#define PUSH_READ(r) do { \
		detail->regs_read[detail->regs_read_count] = r; \
		detail->regs_read_count++; \
	} while (0)
#define PUSH_WRITE(r) do { \
		detail->regs_write[detail->regs_write_count] = r; \
		detail->regs_write_count++; \
	} while (0)
	/*
	 * In eBPF mode, only these instructions have implicit registers access:
	 * - ld{w,h,b,dw} * // w: r0
	 * - exit // r: r0
	 */
	if (EBPF_MODE(ud)) {
		switch (insn_id) {
		default:
			break;
		case BPF_INS_LDW:
		case BPF_INS_LDH:
		case BPF_INS_LDB:
		case BPF_INS_LDDW:
			PUSH_WRITE(BPF_REG_R0);
			break;
		case BPF_INS_EXIT:
			PUSH_READ(BPF_REG_R0);
			break;
		}
		return;
	}

	/* cBPF mode */
	switch (BPF_CLASS(opcode)) {
	default:
		break;
	case BPF_CLASS_LD:
		PUSH_WRITE(BPF_REG_A);
		break;
	case BPF_CLASS_LDX:
		PUSH_WRITE(BPF_REG_X);
		break;
	case BPF_CLASS_ST:
		PUSH_READ(BPF_REG_A);
		break;
	case BPF_CLASS_STX:
		PUSH_READ(BPF_REG_X);
		break;
	case BPF_CLASS_ALU:
		PUSH_READ(BPF_REG_A);
		PUSH_WRITE(BPF_REG_A);
		break;
	case BPF_CLASS_JMP:
		if (insn_id != BPF_INS_JMP) // except the unconditional jump
			PUSH_READ(BPF_REG_A);
		break;
	/* case BPF_CLASS_RET: */
	case BPF_CLASS_MISC:
		if (insn_id == BPF_INS_TAX) {
			PUSH_READ(BPF_REG_A);
			PUSH_WRITE(BPF_REG_X);
		}
		else {
			PUSH_READ(BPF_REG_X);
			PUSH_WRITE(BPF_REG_A);
		}
		break;
	}
}
Exemplo n.º 29
0
/*
 * 1. Convert opcode(id) to BPF_INS_*
 * 2. Set regs_read/regs_write/groups
 */
void BPF_get_insn_id(cs_struct *ud, cs_insn *insn, unsigned int opcode)
{
	// No need to care the mode (cBPF or eBPF) since all checks has be done in
	// BPF_getInstruction, we can simply map opcode to BPF_INS_*.
	cs_detail *detail;
	bpf_insn id = BPF_INS_INVALID;
	bpf_insn_group grp;

	detail = insn->detail;
#ifndef CAPSTONE_DIET
 #define PUSH_GROUP(grp) do { \
		if (detail) { \
			detail->groups[detail->groups_count] = grp; \
			detail->groups_count++; \
		} \
	} while(0)
#else
 #define PUSH_GROUP
#endif

	switch (BPF_CLASS(opcode)) {
	default:	// will never happen
		break;
	case BPF_CLASS_LD:
	case BPF_CLASS_LDX:
		id = op2insn_ld(opcode);
		PUSH_GROUP(BPF_GRP_LOAD);
		break;
	case BPF_CLASS_ST:
	case BPF_CLASS_STX:
		id = op2insn_st(opcode);
		PUSH_GROUP(BPF_GRP_STORE);
		break;
	case BPF_CLASS_ALU:
		id = op2insn_alu(opcode);
		PUSH_GROUP(BPF_GRP_ALU);
		break;
	case BPF_CLASS_JMP:
		grp = BPF_GRP_JUMP;
		id = op2insn_jmp(opcode);
		if (id == BPF_INS_CALL)
			grp = BPF_GRP_CALL;
		else if (id == BPF_INS_EXIT)
			grp = BPF_GRP_RETURN;
		PUSH_GROUP(grp);
		break;
	case BPF_CLASS_RET:
		id = BPF_INS_RET;
		PUSH_GROUP(BPF_GRP_RETURN);
		break;
	// BPF_CLASS_MISC and BPF_CLASS_ALU64 have exactly same value
	case BPF_CLASS_MISC:
	/* case BPF_CLASS_ALU64: */
		if (EBPF_MODE(ud)) {
			// ALU64 in eBPF
			id = op2insn_alu(opcode);
			PUSH_GROUP(BPF_GRP_ALU);
		}
		else {
			if (BPF_MISCOP(opcode) == BPF_MISCOP_TXA)
				id = BPF_INS_TXA;
			else
				id = BPF_INS_TAX;
			PUSH_GROUP(BPF_GRP_MISC);
		}
		break;
	}

	insn->id = id;
#undef PUSH_GROUP

#ifndef CAPSTONE_DIET
	if (detail) {
		update_regs_access(ud, detail, id, opcode);
	}
#endif
}
Exemplo n.º 30
0
static void
opt_blk(struct block *b, int do_stmts)
{
	struct slist *s;
	struct edge *p;
	int i;
	bpf_int32 aval, xval;

#if 0
	for (s = b->stmts; s && s->next; s = s->next)
		if (BPF_CLASS(s->s.code) == BPF_JMP) {
			do_stmts = 0;
			break;
		}
#endif

	/*
	 * Initialize the atom values.
	 */
	p = b->in_edges;
	if (p == 0) {
		/*
		 * We have no predecessors, so everything is undefined
		 * upon entry to this block.
		 */
		memset((char *)b->val, 0, sizeof(b->val));
	} else {
		/*
		 * Inherit values from our predecessors.
		 *
		 * First, get the values from the predecessor along the
		 * first edge leading to this node.
		 */
		memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
		/*
		 * Now look at all the other nodes leading to this node.
		 * If, for the predecessor along that edge, a register
		 * has a different value from the one we have (i.e.,
		 * control paths are merging, and the merging paths
		 * assign different values to that register), give the
		 * register the undefined value of 0.
		 */
		while ((p = p->next) != NULL) {
			for (i = 0; i < N_ATOMS; ++i)
				if (b->val[i] != p->pred->val[i])
					b->val[i] = 0;
		}
	}
	aval = b->val[A_ATOM];
	xval = b->val[X_ATOM];
	for (s = b->stmts; s; s = s->next)
		opt_stmt(&s->s, b->val, do_stmts);

	/*
	 * This is a special case: if we don't use anything from this
	 * block, and we load the accumulator or index register with a
	 * value that is already there, or if this block is a return,
	 * eliminate all the statements.
	 *
	 * XXX - what if it does a store?
	 *
	 * XXX - why does it matter whether we use anything from this
	 * block?  If the accumulator or index register doesn't change
	 * its value, isn't that OK even if we use that value?
	 *
	 * XXX - if we load the accumulator with a different value,
	 * and the block ends with a conditional branch, we obviously
	 * can't eliminate it, as the branch depends on that value.
	 * For the index register, the conditional branch only depends
	 * on the index register value if the test is against the index
	 * register value rather than a constant; if nothing uses the
	 * value we put into the index register, and we're not testing
	 * against the index register's value, and there aren't any
	 * other problems that would keep us from eliminating this
	 * block, can we eliminate it?
	 */
	if (do_stmts &&
	    ((b->out_use == 0 && aval != 0 && b->val[A_ATOM] == aval &&
	      xval != 0 && b->val[X_ATOM] == xval) ||
	     BPF_CLASS(b->s.code) == BPF_RET)) {
		if (b->stmts != 0) {
			b->stmts = 0;
			done = 0;
		}
	} else {
		opt_peep(b);
		opt_deadstores(b);
	}
	/*
	 * Set up values for branch optimizer.
	 */
	if (BPF_SRC(b->s.code) == BPF_K)
		b->oval = K(b->s.k);
	else
		b->oval = b->val[X_ATOM];
	b->et.code = b->s.code;
	b->ef.code = -b->s.code;
}