示例#1
0
static void print_stacks(void)
{
	struct key_t key = {}, next_key;
	__u64 value;
	__u32 stackid = 0, next_id;
	int fd = map_fd[0], stack_map = map_fd[1];

	sys_read_seen = sys_write_seen = false;
	while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
		bpf_map_lookup_elem(fd, &next_key, &value);
		print_stack(&next_key, value);
		bpf_map_delete_elem(fd, &next_key);
		key = next_key;
	}

	if (!sys_read_seen || !sys_write_seen) {
		printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
		int_exit(0);
	}

	/* clear stack map */
	while (bpf_map_get_next_key(stack_map, &stackid, &next_id) == 0) {
		bpf_map_delete_elem(stack_map, &next_id);
		stackid = next_id;
	}
}
示例#2
0
/* Test deletion */
static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
{
	int lru_map_fd, expected_map_fd;
	unsigned long long key, value[nr_cpus];
	unsigned long long end_key;
	int next_cpu = 0;

	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
	       map_flags);

	assert(sched_next_online(0, &next_cpu) != -1);

	if (map_flags & BPF_F_NO_COMMON_LRU)
		lru_map_fd = create_map(map_type, map_flags,
					3 * tgt_free * nr_cpus);
	else
		lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
	assert(lru_map_fd != -1);

	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
				     3 * tgt_free);
	assert(expected_map_fd != -1);

	value[0] = 1234;

	for (key = 1; key <= 2 * tgt_free; key++)
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));

	key = 1;
	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));

	for (key = 1; key <= tgt_free; key++) {
		assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
					    BPF_NOEXIST));
	}

	for (; key <= 2 * tgt_free; key++) {
		assert(!bpf_map_delete_elem(lru_map_fd, &key));
		assert(bpf_map_delete_elem(lru_map_fd, &key));
	}

	end_key = key + 2 * tgt_free;
	for (; key < end_key; key++) {
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				            BPF_NOEXIST));
	}

	assert(map_equal(lru_map_fd, expected_map_fd));

	close(expected_map_fd);
	close(lru_map_fd);

	printf("Pass\n");
}
示例#3
0
static void test_map_in_map(void)
{
	struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 };
	uint32_t result_key = 0, port_key;
	int result, inline_result;
	int magic_result = 0xfaceb00c;
	int ret;
	int i;

	port_key = rand() & 0x00FF;
	populate_map(port_key, magic_result);

	in6.sin6_addr.s6_addr16[0] = 0xdead;
	in6.sin6_addr.s6_addr16[1] = 0xbeef;
	in6.sin6_port = port_key;

	for (i = 0; i < NR_TESTS; i++) {
		printf("%s: ", test_names[i]);

		in6.sin6_addr.s6_addr16[7] = i;
		ret = connect(-1, (struct sockaddr *)&in6, sizeof(in6));
		assert(ret == -1 && errno == EBADF);

		ret = bpf_map_lookup_elem(REG_RESULT_H, &result_key, &result);
		assert(!ret);

		ret = bpf_map_lookup_elem(INLINE_RESULT_H, &result_key,
					  &inline_result);
		assert(!ret);

		if (result != magic_result || inline_result != magic_result) {
			printf("Error. result:%d inline_result:%d\n",
			       result, inline_result);
			exit(1);
		}

		bpf_map_delete_elem(REG_RESULT_H, &result_key);
		bpf_map_delete_elem(INLINE_RESULT_H, &result_key);

		printf("Pass\n");
	}
}
示例#4
0
int bpf_prog2(struct pt_regs *ctx)
{
	long rq = PT_REGS_PARM1(ctx);
	u64 *value, l, base;
	u32 index;

	value = bpf_map_lookup_elem(&my_map, &rq);
	if (!value)
		return 0;

	u64 cur_time = bpf_ktime_get_ns();
	u64 delta = cur_time - *value;

	bpf_map_delete_elem(&my_map, &rq);

	/* the lines below are computing index = log10(delta)*10
	 * using integer arithmetic
	 * index = 29 ~ 1 usec
	 * index = 59 ~ 1 msec
	 * index = 89 ~ 1 sec
	 * index = 99 ~ 10sec or more
	 * log10(x)*10 = log2(x)*10/log2(10) = log2(x)*3
	 */
	l = log2l(delta);
	base = 1ll << l;
	index = (l * 64 + (delta - base) * 64 / base) * 3 / 64;

	if (index >= SLOTS)
		index = SLOTS - 1;

	value = bpf_map_lookup_elem(&lat_map, &index);
	if (value)
		*value += 1;

	return 0;
}
示例#5
0
int bpf_prog2(struct pt_regs *ctx)
{
	long rq = ctx->di;
	struct request *req = (struct request *)ctx->di;
	u64 *value, l, base, cur_time, delta;
	u32 index;

	/* calculate latency */
	value = bpf_map_lookup_elem(&start_ts, &rq);
	if (!value)
		return 0;
	cur_time = bpf_ktime_get_ns();
	delta = cur_time - *value;
	bpf_map_delete_elem(&start_ts, &rq);

	/* using bpf_trace_printk() for DEBUG ONLY; limited to 3 args. */
	char fmt[] = "%d %x %d\n";
	bpf_trace_printk(fmt, sizeof(fmt),
	    _(req->__data_len),			/* bytes */
	    _(req->cmd_flags),			/* flags */
	    delta / 1000);			/* lat_us */

	return 0;
}
示例#6
0
/* Size of the LRU map 1.5 * tgt_free
 * Insert 1 to tgt_free (+tgt_free keys)
 * Update 1 to tgt_free/2
 *   => The original 1 to tgt_free/2 will be removed due to
 *      the LRU shrink process
 * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
 * Insert 1+tgt_free to tgt_free*3/2
 * Insert 1+tgt_free*3/2 to tgt_free*5/2
 *   => Key 1+tgt_free to tgt_free*3/2
 *      will be removed from LRU because it has never
 *      been lookup and ref bit is not set
 */
static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
{
	unsigned long long key, value[nr_cpus];
	unsigned long long end_key;
	int lru_map_fd, expected_map_fd;
	unsigned int batch_size;
	unsigned int map_size;
	int next_cpu = 0;

	if (map_flags & BPF_F_NO_COMMON_LRU)
		/* Ther percpu lru list (i.e each cpu has its own LRU
		 * list) does not have a local free list.  Hence,
		 * it will only free old nodes till there is no free
		 * from the LRU list.  Hence, this test does not apply
		 * to BPF_F_NO_COMMON_LRU
		 */
		return;

	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
	       map_flags);

	assert(sched_next_online(0, &next_cpu) != -1);

	batch_size = tgt_free / 2;
	assert(batch_size * 2 == tgt_free);

	map_size = tgt_free + batch_size;
	if (map_flags & BPF_F_NO_COMMON_LRU)
		lru_map_fd = create_map(map_type, map_flags,
					map_size * nr_cpus);
	else
		lru_map_fd = create_map(map_type, map_flags, map_size);
	assert(lru_map_fd != -1);

	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
	assert(expected_map_fd != -1);

	value[0] = 1234;

	/* Insert 1 to tgt_free (+tgt_free keys) */
	end_key = 1 + tgt_free;
	for (key = 1; key < end_key; key++)
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));

	/* Any bpf_map_update_elem will require to acquire a new node
	 * from LRU first.
	 *
	 * The local list is running out of free nodes.
	 * It gets from the global LRU list which tries to
	 * shrink the inactive list to get tgt_free
	 * number of free nodes.
	 *
	 * Hence, the oldest key 1 to tgt_free/2
	 * are removed from the LRU list.
	 */
	key = 1;
	if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));
		assert(!bpf_map_delete_elem(lru_map_fd, &key));
	} else {
		assert(bpf_map_update_elem(lru_map_fd, &key, value,
					   BPF_EXIST));
	}

	/* Re-insert 1 to tgt_free/2 again and do a lookup
	 * immeidately.
	 */
	end_key = 1 + batch_size;
	value[0] = 4321;
	for (key = 1; key < end_key; key++) {
		assert(bpf_map_lookup_elem(lru_map_fd, &key, value));
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));
		assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
		assert(value[0] == 4321);
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				            BPF_NOEXIST));
	}

	value[0] = 1234;

	/* Insert 1+tgt_free to tgt_free*3/2 */
	end_key = 1 + tgt_free + batch_size;
	for (key = 1 + tgt_free; key < end_key; key++)
		/* These newly added but not referenced keys will be
		 * gone during the next LRU shrink.
		 */
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));

	/* Insert 1+tgt_free*3/2 to  tgt_free*5/2 */
	end_key = key + tgt_free;
	for (; key < end_key; key++) {
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				            BPF_NOEXIST));
	}

	assert(map_equal(lru_map_fd, expected_map_fd));

	close(expected_map_fd);
	close(lru_map_fd);

	printf("Pass\n");
}
示例#7
0
static int bpf_pipeline_setdel_rules(struct net_mat_rule *rule, bool set_rule)
{
	__u8 *key, *value, *tcache;
	int err;
	unsigned int i;

	/* Assert on these because we expect matchlib should never pass us
	 * ill-formed rules. Added assert in the interim because I'm using
	 * this to fuzz-test the middle layer and want hard errors in lower
	 * layer when something goes wrong.
	 */
	assert(rule->table_id < BPF_MATCH_MAX_TABLES);

	/* BPF backend uses a linear reference scheme where uid maps
	 * 1:1 with hw_rule_ids. So when the user ids greater than
	 * the map size drop them.
	 *
	 * Should middle layer catch this?
	 */
	printf("%s: rule %i max %i set:%s\n", __func__, rule->uid, table_aux[rule->table_id].max_elem, set_rule ? "yes" : "no");
	assert(rule->uid < table_aux[rule->table_id].max_elem);
	/*
	if (uid  > table_aux[rule.table_id].max_elem) {
		MAT_LOG(DEBUG, "set_rule rule uid greater than table size!\n";
		return -EINVAL;
	}
	*/

	key = calloc(1, table_aux[rule->table_id].size_key);
	if (!key)
		return -ENOMEM;

	tcache = table_cache[rule->table_id];

	/* deleting a rule is just an update with a zero'd value
	 * so we only setup 'value' on set_rules.
	 */ 
	if (set_rule) {
		value = calloc(1, table_aux[rule->table_id].size_value);
		if (!value) {
			free(key);
			return -ENOMEM;
		}

		err = bpf_match_to_key(rule->table_id, rule->matches, key);
		if (err)
			return err;

		err = bpf_match_to_value(rule->actions, value);
		if (err)
			return err;

		bpf_map_update_elem((__u32)table_fds[rule->table_id],
				    (__u8 *) key,
				    value);

		memcpy(&tcache[rule->uid], key, table_aux[rule->table_id].size_key);
	} else {
		key = (__u8 *)&tcache[rule->uid];

		bpf_map_delete_elem((__u32)table_fds[rule->table_id],
				    (__u8 *) key);
	}

	printf("%s: using key: ", __func__);
	for (i = 0; i < table_aux[rule->table_id].size_key; i++)
		printf("%02x\n", key[i]);
	printf("\n");

	if (!set_rule)
		memset(&tcache[rule->uid], 0, table_aux[rule->table_id].size_key);

	return 0;
}