Exemplo n.º 1
0
static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
{
	arch_spinlock_t *l = (void *)lock;
	union {
		__u32 val;
		arch_spinlock_t lock;
	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };

	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
	arch_spin_lock(l);
}
Exemplo n.º 2
0
struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
{
	struct all_io_list *rep;
	struct thread_data *td;
	size_t depth;
	void *next;
	int i, nr;

	compiletime_assert(sizeof(struct all_io_list) == 8, "all_io_list");

	/*
	 * Calculate reply space needed. We need one 'io_state' per thread,
	 * and the size will vary depending on depth.
	 */
	depth = 0;
	nr = 0;
	for_each_td(td, i) {
		if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
			continue;
		td->stop_io = 1;
		td->flags |= TD_F_VSTATE_SAVED;
		depth += td->o.iodepth;
		nr++;
	}

	if (!nr)
		return NULL;

	*sz = sizeof(*rep);
	*sz += nr * sizeof(struct thread_io_list);
	*sz += depth * sizeof(uint64_t);
	rep = malloc(*sz);

	rep->threads = cpu_to_le64((uint64_t) nr);

	next = &rep->state[0];
	for_each_td(td, i) {
		struct thread_io_list *s = next;
		unsigned int comps;

		if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
			continue;

		if (td->last_write_comp) {
			int j, k;

			if (td->io_blocks[DDIR_WRITE] < td->o.iodepth)
				comps = td->io_blocks[DDIR_WRITE];
			else
				comps = td->o.iodepth;

			k = td->last_write_idx - 1;
			for (j = 0; j < comps; j++) {
				if (k == -1)
					k = td->o.iodepth - 1;
				s->offsets[j] = cpu_to_le64(td->last_write_comp[k]);
				k--;
			}
		} else
			comps = 0;

		s->no_comps = cpu_to_le64((uint64_t) comps);
		s->depth = cpu_to_le64((uint64_t) td->o.iodepth);
		s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
		s->index = cpu_to_le64((uint64_t) i);
		if (td->random_state.use64) {
			s->rand.state64.s[0] = cpu_to_le64(td->random_state.state64.s1);
			s->rand.state64.s[1] = cpu_to_le64(td->random_state.state64.s2);
			s->rand.state64.s[2] = cpu_to_le64(td->random_state.state64.s3);
			s->rand.state64.s[3] = cpu_to_le64(td->random_state.state64.s4);
			s->rand.state64.s[4] = cpu_to_le64(td->random_state.state64.s5);
			s->rand.state64.s[5] = 0;
			s->rand.use64 = cpu_to_le64((uint64_t)1);
		} else {
			s->rand.state32.s[0] = cpu_to_le32(td->random_state.state32.s1);
			s->rand.state32.s[1] = cpu_to_le32(td->random_state.state32.s2);
			s->rand.state32.s[2] = cpu_to_le32(td->random_state.state32.s3);
			s->rand.state32.s[3] = 0;
			s->rand.use64 = 0;
		}
		s->name[sizeof(s->name) - 1] = '\0';
		strncpy((char *) s->name, td->o.name, sizeof(s->name) - 1);
		next = io_list_next(s);
	}

	return rep;
}