예제 #1
0
static void test_ds_buffers(void)
{
	test_begin("data-stack buffer growth");
	T_BEGIN {
		size_t i;
		unsigned char *p;
		size_t left = t_get_bytes_available();
		while (left < 10000) {
			t_malloc_no0(left); /* force a new block */
			left = t_get_bytes_available();
		}
		left -= 64; /* make room for the sentry if DEBUG */
		p = t_buffer_get(1);
		p[0] = 1;
		for (i = 2; i <= left; i++) {
			/* grow it */
			unsigned char *p2 = t_buffer_get(i);
			test_assert_idx(p == p2, i);
			p[i-1] = i;
			test_assert_idx(p[i-2] == (unsigned char)(i-1), i);
		}
		/* now fix it permanently */
		t_buffer_alloc_last_full();
		test_assert(t_get_bytes_available() < 64 + MEM_ALIGN(1));
	} T_END;
	test_end();

	test_begin("data-stack buffer interruption");
	T_BEGIN {
		void *b = t_buffer_get(1000);
		void *a = t_malloc_no0(1);
		void *b2 = t_buffer_get(1001);
		test_assert(a == b); /* expected, not guaranteed */
		test_assert(b2 != b);
	} T_END;
	test_end();

	test_begin("data-stack buffer with reallocs");
	T_BEGIN {
		size_t bigleft = t_get_bytes_available();
		size_t i;
		for (i = 1; i < bigleft-64; i += i_rand()%32) T_BEGIN {
			unsigned char *p, *p2;
			size_t left;
			t_malloc_no0(i);
			left = t_get_bytes_available();
			/* The most useful idx for the assert is 'left' */
			test_assert_idx(left <= bigleft-i, left);
			p = t_buffer_get(left/2);
			p[0] = 'Z'; p[left/2 - 1] = 'Z';
			p2 = t_buffer_get(left + left/2);
			test_assert_idx(p != p2, left);
			test_assert_idx(p[0] == 'Z', left);
			test_assert_idx(p[left/2 -1] == 'Z', left);
		} T_END;
	} T_END;
	test_end();
}
예제 #2
0
const char *password_generate_salt(size_t len)
{
	unsigned int i;
	char *salt;

	salt = t_malloc_no0(len + 1);
	random_fill(salt, len);
	for (i = 0; i < len; i++)
		salt[i] = salt_chars[salt[i] % (sizeof(salt_chars)-1)];
	salt[len] = '\0';
	return salt;
}
예제 #3
0
static void test_ds_recurse(int depth, int number, size_t size)
{
	int i;
	char **ps;
	char tag[2] = { depth+1, '\0' };
	int try_fails = 0;
	data_stack_frame_t t_id = t_push_named("test_ds_recurse[%i]", depth);
	ps = t_buffer_get(sizeof(char *) * number);
	i_assert(ps != NULL);
	t_buffer_alloc(sizeof(char *) * number);

	for (i = 0; i < number; i++) {
		ps[i] = t_malloc_no0(size/2);
		bool re = t_try_realloc(ps[i], size);
		i_assert(ps[i] != NULL);
		if (!re) {
			try_fails++;
			ps[i] = t_malloc_no0(size);
		}
		/* drop our own canaries */
		memset(ps[i], tag[0], size);
		ps[i][size-2] = 0;
	}
	/* Do not expect a high failure rate from t_try_realloc */
	test_assert_idx(try_fails <= number / 20, depth);

	/* Now recurse... */
	if(depth>0)
		test_ds_recurse(depth-1, number, size);

	/* Test our canaries are still intact */
	for (i = 0; i < number; i++) {
		test_assert_idx(strspn(ps[i], tag) == size - 2, i);
		test_assert_idx(ps[i][size-1] == tag[0], i);
	}
	test_assert_idx(t_pop(&t_id), depth);
}
예제 #4
0
static void
acl_shared_namespace_add(struct mail_namespace *ns,
			 struct mail_storage *storage, const char *userdomain)
{
	static struct var_expand_table static_tab[] = {
		{ 'u', NULL, "user" },
		{ 'n', NULL, "username" },
		{ 'd', NULL, "domain" },
		{ '\0', NULL, NULL }
	};
	struct shared_storage *sstorage = (struct shared_storage *)storage;
	struct mail_namespace *new_ns = ns;
	struct var_expand_table *tab;
	struct mailbox_list_iterate_context *iter;
	const struct mailbox_info *info;
	const char *p, *mailbox;
	string_t *str;

	if (strcmp(ns->user->username, userdomain) == 0) {
		/* skip ourself */
		return;
	}

	p = strchr(userdomain, '@');

	tab = t_malloc_no0(sizeof(static_tab));
	memcpy(tab, static_tab, sizeof(static_tab));
	tab[0].value = userdomain;
	tab[1].value = p == NULL ? userdomain : t_strdup_until(userdomain, p);
	tab[2].value = p == NULL ? "" : p + 1;

	str = t_str_new(128);
	var_expand(str, sstorage->ns_prefix_pattern, tab);
	mailbox = str_c(str);
	if (shared_storage_get_namespace(&new_ns, &mailbox) < 0)
		return;

	/* check if there are any mailboxes really visible to us */
	iter = mailbox_list_iter_init(new_ns->list, "*",
				      MAILBOX_LIST_ITER_RETURN_NO_FLAGS);
	while ((info = mailbox_list_iter_next(iter)) != NULL)
		break;
	(void)mailbox_list_iter_deinit(&iter);

	if (info == NULL && !acl_ns_prefix_exists(new_ns)) {
		/* no visible mailboxes, remove the namespace */
		mail_namespace_destroy(new_ns);
	}
}
예제 #5
0
static void test_ds_realloc()
{
	test_begin("data-stack realloc");
	T_BEGIN {
		size_t i;
		unsigned char *p;
		size_t left = t_get_bytes_available();
		while (left < 10000) {
			t_malloc_no0(left); /* force a new block */
			left = t_get_bytes_available();
		}
		left -= 64; /* make room for the sentry if DEBUG */
		p = t_malloc_no0(1);
		p[0] = 1;
		for (i = 2; i <= left; i++) {
			/* grow it */
			test_assert_idx(t_try_realloc(p, i), i);
			p[i-1] = i;
			test_assert_idx(p[i-2] == (unsigned char)(i-1), i);
		}
		test_assert(t_get_bytes_available() < 64 + MEM_ALIGN(1));
	} T_END;
	test_end();
}
예제 #6
0
const char *dec2str(uintmax_t number)
{
	char *buffer;
	int pos;

	pos = MAX_INT_STRLEN;
	buffer = t_malloc_no0(pos);

	buffer[--pos] = '\0';
	do {
		buffer[--pos] = (number % 10) + '0';
		number /= 10;
	} while (number != 0 && pos >= 0);

	i_assert(pos >= 0);
	return buffer + pos;
}
예제 #7
0
const char *t_str_replace(const char *str, char from, char to)
{
	char *out;
	unsigned int i, len;

	if (strchr(str, from) == NULL)
		return str;

	len = strlen(str);
	out = t_malloc_no0(len + 1);
	for (i = 0; i < len; i++) {
		if (str[i] == from)
			out[i] = to;
		else
			out[i] = str[i];
	}
	out[i] = '\0';
	return out;
}
예제 #8
0
static int
uidlist_write_array(struct ostream *output, const uint32_t *uid_list,
		    unsigned int uid_count, uint32_t packed_flags,
		    uint32_t offset, bool write_size, uint32_t *size_r)
{
	uint8_t *uidbuf, *bufp, sizebuf[SQUAT_PACK_MAX_SIZE], *sizebufp;
	uint8_t listbuf[SQUAT_PACK_MAX_SIZE], *listbufp = listbuf;
	uint32_t uid, uid2, prev, base_uid, size_value;
	unsigned int i, bitmask_len, uid_list_len;
	unsigned int idx, max_idx, mask;
	bool datastack;
	int num;

	if ((packed_flags & UIDLIST_PACKED_FLAG_BEGINS_WITH_POINTER) != 0)
		squat_pack_num(&listbufp, offset);

	/* @UNSAFE */
	base_uid = uid_list[0] & ~UID_LIST_MASK_RANGE;
	datastack = uid_count < 1024*8/SQUAT_PACK_MAX_SIZE;
	if (datastack)
		uidbuf = t_malloc_no0(SQUAT_PACK_MAX_SIZE * uid_count);
	else
		uidbuf = i_malloc(SQUAT_PACK_MAX_SIZE * uid_count);
	bufp = uidbuf;
	squat_pack_num(&bufp, base_uid);

	bitmask_len = (uid_list[uid_count-1] - base_uid + 7) / 8 +
		(bufp - uidbuf);
	if (bitmask_len < uid_count) {
	bitmask_build:
		i_assert(bitmask_len < SQUAT_PACK_MAX_SIZE*uid_count);

		memset(bufp, 0, bitmask_len - (bufp - uidbuf));
		if ((uid_list[0] & UID_LIST_MASK_RANGE) == 0) {
			i = 1;
			uid = i == uid_count ? 0 : uid_list[i];
		} else {
			i = 0;
			uid = uid_list[0] + 1;
		}
		base_uid++;

		for (; i < uid_count; i++) {
			i_assert((uid & ~UID_LIST_MASK_RANGE) >= base_uid);
			if ((uid & UID_LIST_MASK_RANGE) == 0) {
				uid -= base_uid;
				uid2 = uid;
			} else {
				uid &= ~UID_LIST_MASK_RANGE;
				uid -= base_uid;
				uid2 = uid_list[i+1] - base_uid;
				i++;
			}

			if (uid2 - uid < 3*8) {
				for (; uid <= uid2; uid++)
					bufp[uid / 8] |= 1 << (uid % 8);
			} else {
				/* first byte */
				idx = uid / 8;
				num = uid % 8;
				if (num != 0) {
					uid += 8 - num;
					for (mask = 0; num < 8; num++)
						mask |= 1 << num;
					bufp[idx++] |= mask;
				}

				/* middle bytes */
				num = uid2 % 8;
				max_idx = idx + (uid2 - num - uid)/8;
				for (; idx < max_idx; idx++, uid += 8)
					bufp[idx] = 0xff;

				/* last byte */
				for (mask = 0; num >= 0; num--)
					mask |= 1 << num;
				bufp[idx] |= mask;
			}
			uid = i+1 == uid_count ? 0 : uid_list[i+1];
		}
		uid_list_len = bitmask_len;
		packed_flags |= UIDLIST_PACKED_FLAG_BITMASK;
	} else {
		bufp = uidbuf;
		prev = 0;
		for (i = 0; i < uid_count; i++) {
			uid = uid_list[i];
			if (unlikely((uid & ~UID_LIST_MASK_RANGE) < prev)) {
				if (!datastack)
					i_free(uidbuf);
				return -1;
			}
			if ((uid & UID_LIST_MASK_RANGE) == 0) {
				squat_pack_num(&bufp, (uid - prev) << 1);
				prev = uid + 1;
			} else {
				uid &= ~UID_LIST_MASK_RANGE;
				squat_pack_num(&bufp, 1 | (uid - prev) << 1);
				squat_pack_num(&bufp, uid_list[i+1] - uid - 1);
				prev = uid_list[i+1] + 1;
				i++;
			}
		}
		uid_list_len = bufp - uidbuf;
		if (uid_list_len > bitmask_len) {
			bufp = uidbuf;
			squat_pack_num(&bufp, base_uid);
			goto bitmask_build;
		}
	}

	size_value = ((uid_list_len +
		       (listbufp - listbuf)) << 2) | packed_flags;
	if (write_size) {
		sizebufp = sizebuf;
		squat_pack_num(&sizebufp, size_value);
		o_stream_nsend(output, sizebuf, sizebufp - sizebuf);
	}
	o_stream_nsend(output, listbuf, listbufp - listbuf);
	o_stream_nsend(output, uidbuf, uid_list_len);
	if (!datastack)
		i_free(uidbuf);

	*size_r = size_value;
	return 0;
}
예제 #9
0
enum fatal_test_state fatal_data_stack(unsigned int stage)
{
#ifdef DEBUG
#define NONEXISTENT_STACK_FRAME_ID (data_stack_frame_t)999999999
	/* If we abort, then we'll be left with a dangling t_push()
	   keep a record of our temporary stack id, so we can clean up. */
	static data_stack_frame_t t_id = NONEXISTENT_STACK_FRAME_ID;
	static unsigned char *undo_ptr = NULL;
	static unsigned char undo_data;
	static bool things_are_messed_up = FALSE;
	if (stage != 0) {
		/* Presume that we need to clean up from the prior test:
		   undo the evil write, then we will be able to t_pop cleanly,
		   and finally we can end the test stanza. */
		if (things_are_messed_up || undo_ptr == NULL)
			return FATAL_TEST_ABORT; /* abort, things are messed up with t_pop */
		*undo_ptr = undo_data;
		undo_ptr = NULL;
		/* t_pop mustn't abort, that would cause recursion */
		things_are_messed_up = TRUE;
		if (t_id != NONEXISTENT_STACK_FRAME_ID && !t_pop(&t_id))
			return FATAL_TEST_ABORT; /* abort, things are messed up with us */
		things_are_messed_up = FALSE;
		t_id = NONEXISTENT_STACK_FRAME_ID;
		test_end();
	}

	switch(stage) {
	case 0: {
		unsigned char *p;
		test_begin("fatal data-stack underrun");
		t_id = t_push_named("fatal_data_stack underrun");
		size_t left = t_get_bytes_available();
		p = t_malloc_no0(left-80); /* will fit */
		p = t_malloc_no0(100); /* won't fit, will get new block */
		int seek = 0;
		/* Seek back for the canary, don't assume endianness */
		while(seek > -60 &&
		      ((p[seek+1] != 0xDB) ||
		       ((p[seek]   != 0xBA || p[seek+2] != 0xAD) &&
			(p[seek+2] != 0xBA || p[seek]   != 0xAD))))
			seek--;
		if (seek <= -60)
			return FATAL_TEST_ABORT; /* abort, couldn't find header */
		undo_ptr = p + seek;
		undo_data = *undo_ptr;
		*undo_ptr = '*';
		/* t_malloc_no0 will panic block header corruption */
		test_expect_fatal_string("Corrupted data stack canary");
		(void)t_malloc_no0(10);
		return FATAL_TEST_FAILURE;
	}

	case 1: case 2: {
		test_begin(stage == 1 ? "fatal t_malloc_no0 overrun near" : "fatal t_malloc_no0 overrun far");
		t_id = t_push_named(stage == 1 ? "fatal t_malloc_no0 overrun first" : "fatal t_malloc_no0 overrun far");
		unsigned char *p = t_malloc_no0(10);
		undo_ptr = p + 10 + (stage == 1 ? 0 : 8*4-1); /* presumes sentry size */
		undo_data = *undo_ptr;
		*undo_ptr = '*';
		/* t_pop will now fail */
		test_expect_fatal_string("buffer overflow");
		(void)t_pop(&t_id);
		t_id = NONEXISTENT_STACK_FRAME_ID; /* We're FUBAR, mustn't pop next entry */
		return FATAL_TEST_FAILURE;
	}

	case 3: case 4: {
		test_begin(stage == 3 ? "fatal t_buffer_get overrun near" : "fatal t_buffer_get overrun far");
		t_id = t_push_named(stage == 3 ? "fatal t_buffer overrun near" : "fatal t_buffer_get overrun far");
		unsigned char *p = t_buffer_get(10);
		undo_ptr = p + 10 + (stage == 3 ? 0 : 8*4-1);
		undo_data = *undo_ptr;
		*undo_ptr = '*';
		/* t_pop will now fail */
		test_expect_fatal_string("buffer overflow");
		(void)t_pop(&t_id);
		t_id = NONEXISTENT_STACK_FRAME_ID; /* We're FUBAR, mustn't pop next entry */
		return FATAL_TEST_FAILURE;
	}

	default:
		things_are_messed_up = TRUE;
		return FATAL_TEST_FINISHED;
	}
#else
	return stage == 0 ? FATAL_TEST_FINISHED : FATAL_TEST_ABORT;
#endif
}