示例#1
0
文件: tlb.c 项目: jvesely/helenos
/** Invalidate single entry in TLB
 *
 * @param page Virtual adress of the page
 */
static inline void invalidate_page(uintptr_t page)
{
#if defined(PROCESSOR_ARCH_armv6) || defined(PROCESSOR_ARCH_armv7_a)
	if (TLBTR_read() & TLBTR_SEP_FLAG) {
		ITLBIMVA_write(page);
		DTLBIMVA_write(page);
	} else {
		TLBIMVA_write(page);
	}
#elif defined(PROCESSOR_arm920t)
	ITLBIMVA_write(page);
	DTLBIMVA_write(page);
#elif defined(PROCESSOR_arm926ej_s)
	TLBIMVA_write(page);
#else
#error Unknown TLB type
#endif

	/*
	 * "A TLB maintenance operation is only guaranteed to be complete after
	 * the execution of a DSB instruction."
	 * "An ISB instruction, or a return from an exception, causes the
	 * effect of all completed TLB maintenance operations that appear in
	 * program order before the ISB or return from exception to be visible
	 * to all subsequent instructions, including the instruction fetches
	 * for those instructions."
	 * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375
	 */
	read_barrier();
	inst_barrier();
}
示例#2
0
文件: syslet-rw.c 项目: cvoltz/fio
static void fio_syslet_add_events(struct thread_data *td, unsigned int nr)
{
	struct syslet_data *sd = td->io_ops->data;
	unsigned int i, uidx;

	uidx = sd->ring->user_tail;
	read_barrier();

	for (i = 0; i < nr; i++) {
		unsigned int idx = (i + uidx) & sd->ring_mask;
		struct syslet_completion *comp = &sd->ring->comp[idx];
		struct io_u *io_u = (struct io_u *) (long) comp->caller_data;
		long ret;

		ret = comp->status;
		if (ret <= 0) {
			io_u->resid = io_u->xfer_buflen;
			io_u->error = -ret;
		} else {
			io_u->resid = io_u->xfer_buflen - ret;
			io_u->error = 0;
		}

		fio_syslet_add_event(td, io_u);
	}
}
示例#3
0
文件: io_uring.c 项目: arh/fio
static int prep_more_ios(struct submitter *s, int max_ios)
{
	struct io_sq_ring *ring = &s->sq_ring;
	unsigned index, tail, next_tail, prepped = 0;

	next_tail = tail = *ring->tail;
	do {
		next_tail++;
		read_barrier();
		if (next_tail == *ring->head)
			break;

		index = tail & sq_ring_mask;
		init_io(s, index);
		ring->array[index] = index;
		prepped++;
		tail = next_tail;
	} while (prepped < max_ios);

	if (*ring->tail != tail) {
		/* order tail store with writes to sqes above */
		write_barrier();
		*ring->tail = tail;
		write_barrier();
	}
	return prepped;
}
示例#4
0
/** Get time of day
 *
 * The time variables are memory mapped (read-only) from kernel which
 * updates them periodically.
 *
 * As it is impossible to read 2 values atomically, we use a trick:
 * First we read the seconds, then we read the microseconds, then we
 * read the seconds again. If a second elapsed in the meantime, set
 * the microseconds to zero.
 *
 * This assures that the values returned by two subsequent calls
 * to gettimeofday() are monotonous.
 *
 */
int gettimeofday(struct timeval *tv, struct timezone *tz)
{
	if (ktime == NULL) {
		uintptr_t faddr;
		int rc = sysinfo_get_value("clock.faddr", &faddr);
		if (rc != EOK) {
			errno = rc;
			return -1;
		}
		
		void *addr;
		rc = physmem_map((void *) faddr, 1,
		    AS_AREA_READ | AS_AREA_CACHEABLE, &addr);
		if (rc != EOK) {
			as_area_destroy(addr);
			errno = rc;
			return -1;
		}
		
		ktime = addr;
	}
	
	if (tz) {
		tz->tz_minuteswest = 0;
		tz->tz_dsttime = DST_NONE;
	}
	
	sysarg_t s2 = ktime->seconds2;
	
	read_barrier();
	tv->tv_usec = ktime->useconds;
	
	read_barrier();
	sysarg_t s1 = ktime->seconds1;
	
	if (s1 != s2) {
		tv->tv_sec = max(s1, s2);
		tv->tv_usec = 0;
	} else
		tv->tv_sec = s1;
	
	return 0;
}
示例#5
0
文件: io_uring.c 项目: arh/fio
static int fio_ioring_commit(struct thread_data *td)
{
	struct ioring_data *ld = td->io_ops_data;
	struct ioring_options *o = td->eo;
	int ret;

	if (!ld->queued)
		return 0;

	/*
	 * Kernel side does submission. just need to check if the ring is
	 * flagged as needing a kick, if so, call io_uring_enter(). This
	 * only happens if we've been idle too long.
	 */
	if (o->sqpoll_thread) {
		struct io_sq_ring *ring = &ld->sq_ring;

		read_barrier();
		if (*ring->flags & IORING_SQ_NEED_WAKEUP)
			io_uring_enter(ld, ld->queued, 0,
					IORING_ENTER_SQ_WAKEUP);
		ld->queued = 0;
		return 0;
	}

	do {
		unsigned start = *ld->sq_ring.head;
		long nr = ld->queued;

		ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
		if (ret > 0) {
			fio_ioring_queued(td, start, ret);
			io_u_mark_submit(td, ret);

			ld->queued -= ret;
			ret = 0;
		} else if (!ret) {
			io_u_mark_submit(td, ret);
			continue;
		} else {
			if (errno == EAGAIN) {
				ret = fio_ioring_cqring_reap(td, 0, ld->queued);
				if (ret)
					continue;
				/* Shouldn't happen */
				usleep(1);
				continue;
			}
			td_verror(td, errno, "io_uring_enter submit");
			break;
		}
	} while (ld->queued);

	return ret;
}
示例#6
0
文件: tlb.c 项目: jvesely/helenos
/** Invalidate all entries in TLB.
 *
 * @note See ARM Architecture reference section 3.7.7 for details.
 */
void tlb_invalidate_all(void)
{
	TLBIALL_write(0);
	/*
	 * "A TLB maintenance operation is only guaranteed to be complete after
	 * the execution of a DSB instruction."
	 * "An ISB instruction, or a return from an exception, causes the
	 * effect of all completed TLB maintenance operations that appear in
	 * program order before the ISB or return from exception to be visible
	 * to all subsequent instructions, including the instruction fetches
	 * for those instructions."
	 * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375
	 */
	read_barrier();
	inst_barrier();
}
示例#7
0
文件: io_uring.c 项目: arh/fio
static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
				   unsigned int max)
{
	struct ioring_data *ld = td->io_ops_data;
	struct io_cq_ring *ring = &ld->cq_ring;
	unsigned head, reaped = 0;

	head = *ring->head;
	do {
		read_barrier();
		if (head == *ring->tail)
			break;
		reaped++;
		head++;
	} while (reaped + events < max);

	*ring->head = head;
	write_barrier();
	return reaped;
}
示例#8
0
文件: io_uring.c 项目: arh/fio
static int reap_events(struct submitter *s)
{
	struct io_cq_ring *ring = &s->cq_ring;
	struct io_uring_cqe *cqe;
	unsigned head, reaped = 0;

	head = *ring->head;
	do {
		struct file *f;

		read_barrier();
		if (head == *ring->tail)
			break;
		cqe = &ring->cqes[head & cq_ring_mask];
		if (!do_nop) {
			f = (struct file *) (uintptr_t) cqe->user_data;
			f->pending_ios--;
			if (cqe->res != BS) {
				printf("io: unexpected ret=%d\n", cqe->res);
				if (polled && cqe->res == -EOPNOTSUPP)
					printf("Your filesystem doesn't support poll\n");
				return -1;
			}
		}
		if (cqe->flags & IOCQE_FLAG_CACHEHIT)
			s->cachehit++;
		else
			s->cachemiss++;
		reaped++;
		head++;
	} while (1);

	s->inflight -= reaped;
	*ring->head = head;
	write_barrier();
	return reaped;
}
示例#9
0
文件: io_uring.c 项目: arh/fio
static enum fio_q_status fio_ioring_queue(struct thread_data *td,
					  struct io_u *io_u)
{
	struct ioring_data *ld = td->io_ops_data;
	struct io_sq_ring *ring = &ld->sq_ring;
	unsigned tail, next_tail;

	fio_ro_check(td, io_u);

	if (ld->queued == ld->iodepth)
		return FIO_Q_BUSY;

	if (io_u->ddir == DDIR_TRIM) {
		if (ld->queued)
			return FIO_Q_BUSY;

		do_io_u_trim(td, io_u);
		io_u_mark_submit(td, 1);
		io_u_mark_complete(td, 1);
		return FIO_Q_COMPLETED;
	}

	tail = *ring->tail;
	next_tail = tail + 1;
	read_barrier();
	if (next_tail == *ring->head)
		return FIO_Q_BUSY;

	/* ensure sqe stores are ordered with tail update */
	write_barrier();
	ring->array[tail & ld->sq_ring_mask] = io_u->index;
	*ring->tail = next_tail;
	write_barrier();

	ld->queued++;
	return FIO_Q_QUEUED;
}
示例#10
0
oop_t ActivationObj::send(bool isImplicitSelf, oop_t sel, bool isUndirected, oop_t delegatee, fint arg_count, oop_t this_act) {
  DECLARE_STACK;
  sp_limit; // unused
  sp -= arg_count;
  smi args_offset = sp;
  oop_t* argsp = oop_addr(io + args_offset);
  oop_t  rcvr = isImplicitSelf  ?  get_self_quickly(io)  :  pop();
  put_sp_quickly(io, sp);

  // todo optimize time slow? -- dmu 1/06
  for (smi i = 0;  i < arg_count;  ++i )
    read_barrier(io + args_offset + i);
  
  oop_t new_act = NULL;

  StringObj* sel_addr = StringObj::from(sel);
  
  // I use this printf so often that I'd like to leave it here (commented out). -- Adam, 5/06
  // printf("sending "); sel_addr->string_print(); printf("\n");
  
  if (sel_addr->bytes()[0] == '_')  {
 
    oop_t result = primitive_send(sel, rcvr, argsp, arg_count, this_act, &new_act);
    // todo optimize time mightHaveScavengedTheActivation:
    // Use a boolean flag to tell if the activation has moved?
    // Look for other places tagged mightHaveScavengedTheActivation. -- Adam, 5/06
    if (!is_mark(result)) {
      ActivationObj* possibly_moved_act_addr = ActivationObj::from(this_act);
      possibly_moved_act_addr->remote_push(result);
    }
  }
  else {
    non_primitive_send(rcvr, argsp, arg_count, isImplicitSelf, sel, isUndirected, delegatee, this_act, &new_act);
  }
  return new_act ? new_act : this_act;
}
示例#11
0
文件: verify.c 项目: apexearth/fio
static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_sha512 *vh = hdr_priv(hdr);
	struct fio_sha512_ctx sha512_ctx = {
		.buf = vh->sha512,
	};

	fio_sha512_init(&sha512_ctx);
	fio_sha512_update(&sha512_ctx, p, len);
}

static void fill_sha256(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_sha256 *vh = hdr_priv(hdr);
	struct fio_sha256_ctx sha256_ctx = {
		.buf = vh->sha256,
	};

	fio_sha256_init(&sha256_ctx);
	fio_sha256_update(&sha256_ctx, p, len);
	fio_sha256_final(&sha256_ctx);
}

static void fill_sha1(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_sha1 *vh = hdr_priv(hdr);
	struct fio_sha1_ctx sha1_ctx = {
		.H = vh->sha1,
	};

	fio_sha1_init(&sha1_ctx);
	fio_sha1_update(&sha1_ctx, p, len);
	fio_sha1_final(&sha1_ctx);
}

static void fill_crc7(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_crc7 *vh = hdr_priv(hdr);

	vh->crc7 = fio_crc7(p, len);
}

static void fill_crc16(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_crc16 *vh = hdr_priv(hdr);

	vh->crc16 = fio_crc16(p, len);
}

static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_crc32 *vh = hdr_priv(hdr);

	vh->crc32 = fio_crc32(p, len);
}

static void fill_crc32c(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_crc32 *vh = hdr_priv(hdr);

	vh->crc32 = fio_crc32c(p, len);
}

static void fill_crc64(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_crc64 *vh = hdr_priv(hdr);

	vh->crc64 = fio_crc64(p, len);
}

static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_md5 *vh = hdr_priv(hdr);
	struct fio_md5_ctx md5_ctx = {
		.hash = (uint32_t *) vh->md5_digest,
	};

	fio_md5_init(&md5_ctx);
	fio_md5_update(&md5_ctx, p, len);
	fio_md5_final(&md5_ctx);
}

static void __fill_hdr(struct verify_header *hdr, int verify_type,
		       uint32_t len, uint64_t rand_seed)
{
	void *p = hdr;

	hdr->magic = FIO_HDR_MAGIC;
	hdr->verify_type = verify_type;
	hdr->len = len;
	hdr->rand_seed = rand_seed;
	hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32));
}


static void fill_hdr(struct verify_header *hdr, int verify_type, uint32_t len,
		     uint64_t rand_seed)
{
	if (verify_type != VERIFY_PATTERN_NO_HDR)
		__fill_hdr(hdr, verify_type, len, rand_seed);
}

static void populate_hdr(struct thread_data *td, struct io_u *io_u,
			 struct verify_header *hdr, unsigned int header_num,
			 unsigned int header_len)
{
	unsigned int data_len;
	void *data, *p;

	p = (void *) hdr;

	fill_hdr(hdr, td->o.verify, header_len, io_u->rand_seed);

	data_len = header_len - hdr_size(td, hdr);

	data = p + hdr_size(td, hdr);
	switch (td->o.verify) {
	case VERIFY_MD5:
		dprint(FD_VERIFY, "fill md5 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_md5(hdr, data, data_len);
		break;
	case VERIFY_CRC64:
		dprint(FD_VERIFY, "fill crc64 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_crc64(hdr, data, data_len);
		break;
	case VERIFY_CRC32C:
	case VERIFY_CRC32C_INTEL:
		dprint(FD_VERIFY, "fill crc32c io_u %p, len %u\n",
						io_u, hdr->len);
		fill_crc32c(hdr, data, data_len);
		break;
	case VERIFY_CRC32:
		dprint(FD_VERIFY, "fill crc32 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_crc32(hdr, data, data_len);
		break;
	case VERIFY_CRC16:
		dprint(FD_VERIFY, "fill crc16 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_crc16(hdr, data, data_len);
		break;
	case VERIFY_CRC7:
		dprint(FD_VERIFY, "fill crc7 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_crc7(hdr, data, data_len);
		break;
	case VERIFY_SHA256:
		dprint(FD_VERIFY, "fill sha256 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_sha256(hdr, data, data_len);
		break;
	case VERIFY_SHA512:
		dprint(FD_VERIFY, "fill sha512 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_sha512(hdr, data, data_len);
		break;
	case VERIFY_XXHASH:
		dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n",
						io_u, hdr->len);
		fill_xxhash(hdr, data, data_len);
		break;
	case VERIFY_META:
		dprint(FD_VERIFY, "fill meta io_u %p, len %u\n",
						io_u, hdr->len);
		fill_meta(hdr, td, io_u, header_num);
		break;
	case VERIFY_SHA1:
		dprint(FD_VERIFY, "fill sha1 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_sha1(hdr, data, data_len);
		break;
	case VERIFY_PATTERN:
	case VERIFY_PATTERN_NO_HDR:
		/* nothing to do here */
		break;
	default:
		log_err("fio: bad verify type: %d\n", td->o.verify);
		assert(0);
	}

	if (td->o.verify_offset && hdr_size(td, hdr))
		memswp(p, p + td->o.verify_offset, hdr_size(td, hdr));
}

/*
 * fill body of io_u->buf with random data and add a header with the
 * checksum of choice
 */
void populate_verify_io_u(struct thread_data *td, struct io_u *io_u)
{
	if (td->o.verify == VERIFY_NULL)
		return;

	io_u->numberio = td->io_issues[io_u->ddir];

	fill_pattern_headers(td, io_u, 0, 0);
}

int get_next_verify(struct thread_data *td, struct io_u *io_u)
{
	struct io_piece *ipo = NULL;

	/*
	 * this io_u is from a requeue, we already filled the offsets
	 */
	if (io_u->file)
		return 0;

	if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
		struct rb_node *n = rb_first(&td->io_hist_tree);

		ipo = rb_entry(n, struct io_piece, rb_node);

		/*
		 * Ensure that the associated IO has completed
		 */
		read_barrier();
		if (ipo->flags & IP_F_IN_FLIGHT)
			goto nothing;

		rb_erase(n, &td->io_hist_tree);
		assert(ipo->flags & IP_F_ONRB);
		ipo->flags &= ~IP_F_ONRB;
	} else if (!flist_empty(&td->io_hist_list)) {
		ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);

		/*
		 * Ensure that the associated IO has completed
		 */
		read_barrier();
		if (ipo->flags & IP_F_IN_FLIGHT)
			goto nothing;

		flist_del(&ipo->list);
		assert(ipo->flags & IP_F_ONLIST);
		ipo->flags &= ~IP_F_ONLIST;
	}

	if (ipo) {
		td->io_hist_len--;

		io_u->offset = ipo->offset;
		io_u->buflen = ipo->len;
		io_u->numberio = ipo->numberio;
		io_u->file = ipo->file;
		io_u_set(io_u, IO_U_F_VER_LIST);

		if (ipo->flags & IP_F_TRIMMED)
			io_u_set(io_u, IO_U_F_TRIMMED);

		if (!fio_file_open(io_u->file)) {
			int r = td_io_open_file(td, io_u->file);

			if (r) {
				dprint(FD_VERIFY, "failed file %s open\n",
						io_u->file->file_name);
				return 1;
			}
		}

		get_file(ipo->file);
		assert(fio_file_open(io_u->file));
		io_u->ddir = DDIR_READ;
		io_u->xfer_buf = io_u->buf;
		io_u->xfer_buflen = io_u->buflen;

		remove_trim_entry(td, ipo);
		free(ipo);
		dprint(FD_VERIFY, "get_next_verify: ret io_u %p\n", io_u);

		if (!td->o.verify_pattern_bytes) {
			io_u->rand_seed = __rand(&td->verify_state);
			if (sizeof(int) != sizeof(long *))
				io_u->rand_seed *= __rand(&td->verify_state);
		}
		return 0;
	}

nothing:
	dprint(FD_VERIFY, "get_next_verify: empty\n");
	return 1;
}
示例#12
0
文件: verify.c 项目: apexearth/fio
static void *verify_async_thread(void *data)
{
	struct thread_data *td = data;
	struct io_u *io_u;
	int ret = 0;

	if (fio_option_is_set(&td->o, verify_cpumask) &&
	    fio_setaffinity(td->pid, td->o.verify_cpumask)) {
		log_err("fio: failed setting verify thread affinity\n");
		goto done;
	}

	do {
		FLIST_HEAD(list);

		read_barrier();
		if (td->verify_thread_exit)
			break;

		pthread_mutex_lock(&td->io_u_lock);

		while (flist_empty(&td->verify_list) &&
		       !td->verify_thread_exit) {
			ret = pthread_cond_wait(&td->verify_cond,
							&td->io_u_lock);
			if (ret) {
				pthread_mutex_unlock(&td->io_u_lock);
				break;
			}
		}

		flist_splice_init(&td->verify_list, &list);
		pthread_mutex_unlock(&td->io_u_lock);

		if (flist_empty(&list))
			continue;

		while (!flist_empty(&list)) {
			io_u = flist_first_entry(&list, struct io_u, verify_list);
			flist_del_init(&io_u->verify_list);

			io_u_set(io_u, IO_U_F_NO_FILE_PUT);
			ret = verify_io_u(td, &io_u);

			put_io_u(td, io_u);
			if (!ret)
				continue;
			if (td_non_fatal_error(td, ERROR_TYPE_VERIFY_BIT, ret)) {
				update_error_count(td, ret);
				td_clear_error(td);
				ret = 0;
			}
		}
	} while (!ret);

	if (ret) {
		td_verror(td, ret, "async_verify");
		if (td->o.verify_fatal)
			fio_mark_td_terminate(td);
	}

done:
	pthread_mutex_lock(&td->io_u_lock);
	td->nr_verify_threads--;
	pthread_mutex_unlock(&td->io_u_lock);

	pthread_cond_signal(&td->free_cond);
	return NULL;
}
示例#13
0
/***********************************************************************
 * Read a universe from 'filename'.
 *
 * We're reading photon ascii. So the basic algorithm is to read
 * 'Photon Ascii Instances' and then switch on the type and read the fields.
 * Ignore unknown instances.
 *
 * RETURNS:
 *	On Success, a universe object is returned.
 *	On Failure, NULL is returned and errbuf contains a error message.
 *
 */
UNIVERSE *Universe_ReadAscii(const char *filename, char *errbuf)
{
	PHASCII_FILE phf;
	PHASCII_INSTANCE pi;
	char errmsg[1000];
	int success;
	UNIVERSE *u;

	ASSERT( filename != NULL );
	ASSERT( errbuf != NULL );

	phf = Phascii_Open(filename, "r");
	if( phf == NULL ) {
		strcpy(errbuf, Phascii_GetError());
		return NULL;
	}

	u = NULL;

	while( pi = Phascii_GetInstance(phf) ) {

		if( Phascii_IsInstance(pi, "ORGANIC") ) {
			success = read_organic(pi, u, errmsg);

		} else if( Phascii_IsInstance(pi, "BARRIER") ) {
			success = read_barrier(pi, u, errmsg);

		} else if( Phascii_IsInstance(pi, "ER") ) {
			success = read_er(pi, u, errmsg);

		} else if( Phascii_IsInstance(pi, "KFMO") ) {
			success = read_kfmo(pi, u, errmsg);

		} else if( Phascii_IsInstance(pi, "KEYLIST") ) {
			success = read_keylist(pi, u, errmsg);

		} else if( Phascii_IsInstance(pi, "SPORE") ) {
			success = read_spore(pi, u, errmsg);

		} else if( Phascii_IsInstance(pi, "CELL") ) {
			success = read_cell(pi, u, errmsg);

		} else if( Phascii_IsInstance(pi, "ORGANISM") ) {
			success = read_organism(pi, u, errmsg);

		} else if( Phascii_IsInstance(pi, "PLAYER") ) {
			success = read_player(pi, u, errmsg);

		} else if( Phascii_IsInstance(pi, "UNIVERSE") ) {
			success = read_universe(pi, &u, errmsg);

		} else {
			success = 1;
		}

		Phascii_FreeInstance(pi);

		if( ! success ) {
			sprintf(errbuf, "%s", errmsg);
			Phascii_Close(phf);
			return NULL;
		}
	}

	if( ! Phascii_Eof(phf) ) {
		sprintf(errbuf, "%s\n", Phascii_Error(phf));
		Phascii_Close(phf);
		return NULL;
	}

	Phascii_Close(phf);

	if( u == NULL ) {
		sprintf(errbuf, "%s: No UNIVERSE instance", filename);
		return NULL;
	}

	return u;
}
示例#14
0
文件: verify.c 项目: Rapaka/fio
static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_sha512 *vh = hdr_priv(hdr);
	struct fio_sha512_ctx sha512_ctx = {
		.buf = vh->sha512,
	};

	fio_sha512_init(&sha512_ctx);
	fio_sha512_update(&sha512_ctx, p, len);
}

static void fill_sha256(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_sha256 *vh = hdr_priv(hdr);
	struct fio_sha256_ctx sha256_ctx = {
		.buf = vh->sha256,
	};

	fio_sha256_init(&sha256_ctx);
	fio_sha256_update(&sha256_ctx, p, len);
}

static void fill_sha1(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_sha1 *vh = hdr_priv(hdr);
	struct fio_sha1_ctx sha1_ctx = {
		.H = vh->sha1,
	};

	fio_sha1_init(&sha1_ctx);
	fio_sha1_update(&sha1_ctx, p, len);
}

static void fill_crc7(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_crc7 *vh = hdr_priv(hdr);

	vh->crc7 = fio_crc7(p, len);
}

static void fill_crc16(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_crc16 *vh = hdr_priv(hdr);

	vh->crc16 = fio_crc16(p, len);
}

static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_crc32 *vh = hdr_priv(hdr);

	vh->crc32 = fio_crc32(p, len);
}

static void fill_crc32c(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_crc32 *vh = hdr_priv(hdr);

	vh->crc32 = fio_crc32c(p, len);
}

static void fill_crc64(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_crc64 *vh = hdr_priv(hdr);

	vh->crc64 = fio_crc64(p, len);
}

static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
{
	struct vhdr_md5 *vh = hdr_priv(hdr);
	struct fio_md5_ctx md5_ctx = {
		.hash = (uint32_t *) vh->md5_digest,
	};

	fio_md5_init(&md5_ctx);
	fio_md5_update(&md5_ctx, p, len);
}

static void populate_hdr(struct thread_data *td, struct io_u *io_u,
			 struct verify_header *hdr, unsigned int header_num,
			 unsigned int header_len)
{
	unsigned int data_len;
	void *data, *p;

	p = (void *) hdr;

	hdr->magic = FIO_HDR_MAGIC;
	hdr->verify_type = td->o.verify;
	hdr->len = header_len;
	hdr->rand_seed = io_u->rand_seed;
	hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32));

	data_len = header_len - hdr_size(hdr);

	data = p + hdr_size(hdr);
	switch (td->o.verify) {
	case VERIFY_MD5:
		dprint(FD_VERIFY, "fill md5 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_md5(hdr, data, data_len);
		break;
	case VERIFY_CRC64:
		dprint(FD_VERIFY, "fill crc64 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_crc64(hdr, data, data_len);
		break;
	case VERIFY_CRC32C:
	case VERIFY_CRC32C_INTEL:
		dprint(FD_VERIFY, "fill crc32c io_u %p, len %u\n",
						io_u, hdr->len);
		fill_crc32c(hdr, data, data_len);
		break;
	case VERIFY_CRC32:
		dprint(FD_VERIFY, "fill crc32 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_crc32(hdr, data, data_len);
		break;
	case VERIFY_CRC16:
		dprint(FD_VERIFY, "fill crc16 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_crc16(hdr, data, data_len);
		break;
	case VERIFY_CRC7:
		dprint(FD_VERIFY, "fill crc7 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_crc7(hdr, data, data_len);
		break;
	case VERIFY_SHA256:
		dprint(FD_VERIFY, "fill sha256 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_sha256(hdr, data, data_len);
		break;
	case VERIFY_SHA512:
		dprint(FD_VERIFY, "fill sha512 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_sha512(hdr, data, data_len);
		break;
	case VERIFY_META:
		dprint(FD_VERIFY, "fill meta io_u %p, len %u\n",
						io_u, hdr->len);
		fill_meta(hdr, td, io_u, header_num);
		break;
	case VERIFY_SHA1:
		dprint(FD_VERIFY, "fill sha1 io_u %p, len %u\n",
						io_u, hdr->len);
		fill_sha1(hdr, data, data_len);
		break;
	case VERIFY_PATTERN:
		/* nothing to do here */
		break;
	default:
		log_err("fio: bad verify type: %d\n", td->o.verify);
		assert(0);
	}
	if (td->o.verify_offset)
		memswp(p, p + td->o.verify_offset, hdr_size(hdr));
}

/*
 * fill body of io_u->buf with random data and add a header with the
 * checksum of choice
 */
void populate_verify_io_u(struct thread_data *td, struct io_u *io_u)
{
	if (td->o.verify == VERIFY_NULL)
		return;

	fill_pattern_headers(td, io_u, 0, 0);
}

int get_next_verify(struct thread_data *td, struct io_u *io_u)
{
	struct io_piece *ipo = NULL;

	/*
	 * this io_u is from a requeue, we already filled the offsets
	 */
	if (io_u->file)
		return 0;

	if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
		struct rb_node *n = rb_first(&td->io_hist_tree);

		ipo = rb_entry(n, struct io_piece, rb_node);
		rb_erase(n, &td->io_hist_tree);
		assert(ipo->flags & IP_F_ONRB);
		ipo->flags &= ~IP_F_ONRB;
	} else if (!flist_empty(&td->io_hist_list)) {
		ipo = flist_entry(td->io_hist_list.next, struct io_piece, list);
		flist_del(&ipo->list);
		assert(ipo->flags & IP_F_ONLIST);
		ipo->flags &= ~IP_F_ONLIST;
	}

	if (ipo) {
		td->io_hist_len--;

		io_u->offset = ipo->offset;
		io_u->buflen = ipo->len;
		io_u->file = ipo->file;
		io_u->flags |= IO_U_F_VER_LIST;

		if (ipo->flags & IP_F_TRIMMED)
			io_u->flags |= IO_U_F_TRIMMED;

		if (!fio_file_open(io_u->file)) {
			int r = td_io_open_file(td, io_u->file);

			if (r) {
				dprint(FD_VERIFY, "failed file %s open\n",
						io_u->file->file_name);
				return 1;
			}
		}

		get_file(ipo->file);
		assert(fio_file_open(io_u->file));
		io_u->ddir = DDIR_READ;
		io_u->xfer_buf = io_u->buf;
		io_u->xfer_buflen = io_u->buflen;

		remove_trim_entry(td, ipo);
		free(ipo);
		dprint(FD_VERIFY, "get_next_verify: ret io_u %p\n", io_u);
		return 0;
	}

	dprint(FD_VERIFY, "get_next_verify: empty\n");
	return 1;
}

void fio_verify_init(struct thread_data *td)
{
	if (td->o.verify == VERIFY_CRC32C_INTEL ||
	    td->o.verify == VERIFY_CRC32C) {
		crc32c_intel_probe();
	}
}

static void *verify_async_thread(void *data)
{
	struct thread_data *td = data;
	struct io_u *io_u;
	int ret = 0;

	if (td->o.verify_cpumask_set &&
	    fio_setaffinity(td->pid, td->o.verify_cpumask)) {
		log_err("fio: failed setting verify thread affinity\n");
		goto done;
	}

	do {
		FLIST_HEAD(list);

		read_barrier();
		if (td->verify_thread_exit)
			break;

		pthread_mutex_lock(&td->io_u_lock);

		while (flist_empty(&td->verify_list) &&
		       !td->verify_thread_exit) {
			ret = pthread_cond_wait(&td->verify_cond,
							&td->io_u_lock);
			if (ret) {
				pthread_mutex_unlock(&td->io_u_lock);
				break;
			}
		}

		flist_splice_init(&td->verify_list, &list);
		pthread_mutex_unlock(&td->io_u_lock);

		if (flist_empty(&list))
			continue;

		while (!flist_empty(&list)) {
			io_u = flist_entry(list.next, struct io_u, verify_list);
			flist_del(&io_u->verify_list);

			ret = verify_io_u(td, io_u);
			put_io_u(td, io_u);
			if (!ret)
				continue;
			if (td_non_fatal_error(td, ERROR_TYPE_VERIFY_BIT, ret)) {
				update_error_count(td, ret);
				td_clear_error(td);
				ret = 0;
			}
		}
	} while (!ret);

	if (ret) {
		td_verror(td, ret, "async_verify");
		if (td->o.verify_fatal)
			td->terminate = 1;
	}

done:
	pthread_mutex_lock(&td->io_u_lock);
	td->nr_verify_threads--;
	pthread_mutex_unlock(&td->io_u_lock);

	pthread_cond_signal(&td->free_cond);
	return NULL;
}

int verify_async_init(struct thread_data *td)
{
	int i, ret;
	pthread_attr_t attr;

	pthread_attr_init(&attr);
	pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN);

	td->verify_thread_exit = 0;

	td->verify_threads = malloc(sizeof(pthread_t) * td->o.verify_async);
	for (i = 0; i < td->o.verify_async; i++) {
		ret = pthread_create(&td->verify_threads[i], &attr,
					verify_async_thread, td);
		if (ret) {
			log_err("fio: async verify creation failed: %s\n",
					strerror(ret));
			break;
		}
		ret = pthread_detach(td->verify_threads[i]);
		if (ret) {
			log_err("fio: async verify thread detach failed: %s\n",
					strerror(ret));
			break;
		}
		td->nr_verify_threads++;
	}

	pthread_attr_destroy(&attr);

	if (i != td->o.verify_async) {
		log_err("fio: only %d verify threads started, exiting\n", i);
		td->verify_thread_exit = 1;
		write_barrier();
		pthread_cond_broadcast(&td->verify_cond);
		return 1;
	}

	return 0;
}