Пример #1
0
static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
		   struct tcf_result *res)
{
	struct tcf_bpf *prog = act->priv;
	int action, filter_res;
	bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;

	if (unlikely(!skb_mac_header_was_set(skb)))
		return TC_ACT_UNSPEC;

	spin_lock(&prog->tcf_lock);

	prog->tcf_tm.lastuse = jiffies;
	bstats_update(&prog->tcf_bstats, skb);

	/* Needed here for accessing maps. */
	rcu_read_lock();
	if (at_ingress) {
		__skb_push(skb, skb->mac_len);
		filter_res = BPF_PROG_RUN(prog->filter, skb);
		__skb_pull(skb, skb->mac_len);
	} else {
		filter_res = BPF_PROG_RUN(prog->filter, skb);
	}
	rcu_read_unlock();

	/* A BPF program may overwrite the default action opcode.
	 * Similarly as in cls_bpf, if filter_res == -1 we use the
	 * default action specified from tc.
	 *
	 * In case a different well-known TC_ACT opcode has been
	 * returned, it will overwrite the default one.
	 *
	 * For everything else that is unkown, TC_ACT_UNSPEC is
	 * returned.
	 */
	switch (filter_res) {
	case TC_ACT_PIPE:
	case TC_ACT_RECLASSIFY:
	case TC_ACT_OK:
		action = filter_res;
		break;
	case TC_ACT_SHOT:
		action = filter_res;
		prog->tcf_qstats.drops++;
		break;
	case TC_ACT_UNSPEC:
		action = prog->tcf_action;
		break;
	default:
		action = TC_ACT_UNSPEC;
		break;
	}

	spin_unlock(&prog->tcf_lock);
	return action;
}
Пример #2
0
static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
		   struct tcf_result *res)
{
	struct tcf_bpf *prog = act->priv;
	struct bpf_prog *filter;
	int action, filter_res;
	bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;

	if (unlikely(!skb_mac_header_was_set(skb)))
		return TC_ACT_UNSPEC;

	tcf_lastuse_update(&prog->tcf_tm);
	bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);

	rcu_read_lock();
	filter = rcu_dereference(prog->filter);
	if (at_ingress) {
		__skb_push(skb, skb->mac_len);
		filter_res = BPF_PROG_RUN(filter, skb);
		__skb_pull(skb, skb->mac_len);
	} else {
		filter_res = BPF_PROG_RUN(filter, skb);
	}
	rcu_read_unlock();

	/* A BPF program may overwrite the default action opcode.
	 * Similarly as in cls_bpf, if filter_res == -1 we use the
	 * default action specified from tc.
	 *
	 * In case a different well-known TC_ACT opcode has been
	 * returned, it will overwrite the default one.
	 *
	 * For everything else that is unkown, TC_ACT_UNSPEC is
	 * returned.
	 */
	switch (filter_res) {
	case TC_ACT_PIPE:
	case TC_ACT_RECLASSIFY:
	case TC_ACT_OK:
	case TC_ACT_REDIRECT:
		action = filter_res;
		break;
	case TC_ACT_SHOT:
		action = filter_res;
		qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
		break;
	case TC_ACT_UNSPEC:
		action = prog->tcf_action;
		break;
	default:
		action = TC_ACT_UNSPEC;
		break;
	}

	return action;
}
Пример #3
0
static u32 seccomp_run_filters(const struct seccomp_data *sd,
			       struct seccomp_filter **match)
{
	struct seccomp_data sd_local;
	u32 ret = SECCOMP_RET_ALLOW;
	/* Make sure cross-thread synced filter points somewhere sane. */
	struct seccomp_filter *f =
			READ_ONCE(current->seccomp.filter);

	/* Ensure unexpected behavior doesn't result in failing open. */
	if (unlikely(WARN_ON(f == NULL)))
		return SECCOMP_RET_KILL_PROCESS;

	if (!sd) {
		populate_seccomp_data(&sd_local);
		sd = &sd_local;
	}

	/*
	 * All filters in the list are evaluated and the lowest BPF return
	 * value always takes priority (ignoring the DATA).
	 */
	for (; f; f = f->prev) {
		u32 cur_ret = BPF_PROG_RUN(f->prog, sd);

		if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
			ret = cur_ret;
			*match = f;
		}
	}
	return ret;
}
Пример #4
0
/**
 * trace_call_bpf - invoke BPF program
 * @prog: BPF program
 * @ctx: opaque context pointer
 *
 * kprobe handlers execute BPF programs via this helper.
 * Can be used from static tracepoints in the future.
 *
 * Return: BPF programs always return an integer which is interpreted by
 * kprobe handler as:
 * 0 - return from kprobe (event is filtered out)
 * 1 - store kprobe event into ring buffer
 * Other values are reserved and currently alias to 1
 */
unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
{
    unsigned int ret;

    if (in_nmi()) /* not supported yet */
        return 1;

    preempt_disable();

    if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
        /*
         * since some bpf program is already running on this cpu,
         * don't call into another bpf program (same or different)
         * and don't send kprobe event into ring-buffer,
         * so return zero here
         */
        ret = 0;
        goto out;
    }

    rcu_read_lock();
    ret = BPF_PROG_RUN(prog, ctx);
    rcu_read_unlock();

out:
    __this_cpu_dec(bpf_prog_active);
    preempt_enable();

    return ret;
}
Пример #5
0
/**
 * seccomp_run_filters - evaluates all seccomp filters against @syscall
 * @syscall: number of the current system call
 *
 * Returns valid seccomp BPF response codes.
 */
static u32 seccomp_run_filters(struct seccomp_data *sd)
{
	struct seccomp_data sd_local;
	u32 ret = SECCOMP_RET_ALLOW;
	/* Make sure cross-thread synced filter points somewhere sane. */
	struct seccomp_filter *f =
			lockless_dereference(current->seccomp.filter);

	/* Ensure unexpected behavior doesn't result in failing open. */
	if (unlikely(WARN_ON(f == NULL)))
		return SECCOMP_RET_KILL;

	if (!sd) {
		populate_seccomp_data(&sd_local);
		sd = &sd_local;
	}

	/*
	 * All filters in the list are evaluated and the lowest BPF return
	 * value always takes priority (ignoring the DATA).
	 */
	for (; f; f = f->prev) {
		u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd);

		if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
			ret = cur_ret;
	}
	return ret;
}
Пример #6
0
static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_bpf_info *info = par->matchinfo;

	return BPF_PROG_RUN(info->filter, skb);
}
Пример #7
0
unsigned int filter(struct bpf_prog *prog1, struct bpf_context *ctx){
    __u64 ret = BPF_PROG_RUN(prog1, (void*) ctx);
    return ret;
}