Esempio n. 1
0
static int client_rw(int out_fd, int file_fd, int offset)
{
	int loops = client_loops;
	struct timeval start;
	struct stat sb;
	char *buf;
	unsigned long long size;
	unsigned long msecs;

	if (fstat(file_fd, &sb) < 0)
		return error("fstat");

	buf = malloc(splice_size);

	gettimeofday(&start, NULL);
again:
	if (lseek(file_fd, 0, SEEK_SET) < 0)
		return error("lseek");

	size = sb.st_size;
	while (size) {
		int this_len = min(size, (unsigned long long) splice_size);
		int ret = read(file_fd, buf, this_len);

		if (ret < 0)
			return error("read");

		size -= ret;
		while (ret) {
			int written = write(out_fd, buf, ret);

			if (written < 0)
				return error("write");

			ret -= written;
		}
	}

	loops--;

	if ((mtime_since_now(&start) < max_client_run * 1000) && loops)
		goto again;

	free(buf);
	size = sb.st_size >> 10;
	size *= (client_loops - loops);
	msecs = mtime_since_now(&start);
	fprintf(stdout, "Client%d (rw): %Lu MiB/sec (%LuMiB in %lu msecs)\n", offset, size / (unsigned long long) msecs, size >> 10, msecs);
	return 0;
}
Esempio n. 2
0
static int client_mmap(int out_fd, int file_fd, int offset)
{
	int loops = client_loops;
	struct timeval start;
	struct stat sb;
	void *mmap_area, *buf;
	unsigned long long size;
	unsigned long msecs;

	if (fstat(file_fd, &sb) < 0)
		return error("fstat");

	mmap_area = mmap(NULL, sb.st_size, PROT_READ, MAP_SHARED, file_fd, 0);
	if (mmap_area == MAP_FAILED)
		return error("mmap");

	if (madvise(mmap_area, sb.st_size, MADV_WILLNEED) < 0)
		return error("madvise");

	gettimeofday(&start, NULL);
again:
	buf = mmap_area;
	size = sb.st_size;
	while (size) {
		int this_len = min(size, (unsigned long long) splice_size);
		int ret = write(out_fd, buf, this_len);

		if (ret < 0)
			return error("write");

		buf += ret;
		size -= ret;
	}

	loops--;

	if ((mtime_since_now(&start) < max_client_run * 1000) && loops)
		goto again;

	size = sb.st_size >> 10;
	size *= (client_loops - loops);
	msecs = mtime_since_now(&start);
	fprintf(stdout, "Client%d (mmap): %Lu MiB/sec (%LuMiB in %lu msecs)\n", offset, size / (unsigned long long) msecs, size >> 10, msecs);
	munmap(mmap_area, sb.st_size);
	return 0;

}
Esempio n. 3
0
static int client_splice_loop(int out_fd, int fd, int *pfd, int offset)
{
	struct timeval start;
	unsigned long long size;
	unsigned long msecs;
	struct stat sb;
	int loops = client_loops;
	loff_t off;

	if (fstat(fd, &sb) < 0)
		return error("fstat");

	gettimeofday(&start, NULL);
again:
	size = sb.st_size;
	off = 0;

	do {
		int ret = ssplice(fd, &off, pfd[1], NULL, min(size, (unsigned long long) splice_size), 0);

		if (ret <= 0)
			return error("splice-in");

		size -= ret;
		while (ret > 0) {
			int flags = size ? SPLICE_F_MORE : 0;
			int written = ssplice(pfd[0], NULL, out_fd, NULL, ret, flags);

			if (written <= 0)
				return error("splice-out");

			ret -= written;
		}
	} while (size);

	loops--;

	if ((mtime_since_now(&start) < max_client_run * 1000) && loops)
		goto again;

	size = sb.st_size >> 10;
	size *= (client_loops - loops);
	msecs = mtime_since_now(&start);
	fprintf(stdout, "Client%d (splice): %Lu MiB/sec (%LuMiB in %lu msecs)\n", offset, size / (unsigned long long) msecs, size >> 10, msecs);
	return 0;
}
Esempio n. 4
0
/*
 * Best effort calculation of the estimated pending runtime of a job.
 */
static int thread_eta(struct thread_data *td)
{
    unsigned long long bytes_total, bytes_done;
    unsigned long eta_sec = 0;
    unsigned long elapsed;
    uint64_t timeout;

    elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
    timeout = td->o.timeout / 1000000UL;

    bytes_total = td->total_io_size;

    if (td->o.fill_device && td->o.size  == -1ULL) {
        if (!td->fill_device_size || td->fill_device_size == -1ULL)
            return 0;

        bytes_total = td->fill_device_size;
    }

    if (td->o.zone_size && td->o.zone_skip && bytes_total) {
        unsigned int nr_zones;
        uint64_t zone_bytes;

        zone_bytes = bytes_total + td->o.zone_size + td->o.zone_skip;
        nr_zones = (zone_bytes - 1) / (td->o.zone_size + td->o.zone_skip);
        bytes_total -= nr_zones * td->o.zone_skip;
    }

    /*
     * if writing and verifying afterwards, bytes_total will be twice the
     * size. In a mixed workload, verify phase will be the size of the
     * first stage writes.
     */
    if (td->o.do_verify && td->o.verify && td_write(td)) {
        if (td_rw(td)) {
            unsigned int perc = 50;

            if (td->o.rwmix[DDIR_WRITE])
                perc = td->o.rwmix[DDIR_WRITE];

            bytes_total += (bytes_total * perc) / 100;
        } else
            bytes_total <<= 1;
    }

    if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
        double perc, perc_t;

        bytes_done = ddir_rw_sum(td->io_bytes);

        if (bytes_total) {
            perc = (double) bytes_done / (double) bytes_total;
            if (perc > 1.0)
                perc = 1.0;
        } else
            perc = 0.0;

        if (td->o.time_based) {
            if (timeout) {
                perc_t = (double) elapsed / (double) timeout;
                if (perc_t < perc)
                    perc = perc_t;
            } else {
                /*
                 * Will never hit, we can't have time_based
                 * without a timeout set.
                 */
                perc = 0.0;
            }
        }

        eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;

        if (td->o.timeout &&
                eta_sec > (timeout + done_secs - elapsed))
            eta_sec = timeout + done_secs - elapsed;
    } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
               || td->runstate == TD_INITIALIZED
               || td->runstate == TD_SETTING_UP
               || td->runstate == TD_RAMP
               || td->runstate == TD_PRE_READING) {
        int t_eta = 0, r_eta = 0;
        unsigned long long rate_bytes;

        /*
         * We can only guess - assume it'll run the full timeout
         * if given, otherwise assume it'll run at the specified rate.
         */
        if (td->o.timeout) {
            uint64_t timeout = td->o.timeout;
            uint64_t start_delay = td->o.start_delay;
            uint64_t ramp_time = td->o.ramp_time;

            t_eta = timeout + start_delay + ramp_time;
            t_eta /= 1000000ULL;

            if (in_ramp_time(td)) {
                unsigned long ramp_left;

                ramp_left = mtime_since_now(&td->epoch);
                ramp_left = (ramp_left + 999) / 1000;
                if (ramp_left <= t_eta)
                    t_eta -= ramp_left;
            }
        }
        rate_bytes = ddir_rw_sum(td->o.rate);
        if (rate_bytes) {
            r_eta = (bytes_total / 1024) / rate_bytes;
            r_eta += (td->o.start_delay / 1000000ULL);
        }

        if (r_eta && t_eta)
            eta_sec = min(r_eta, t_eta);
        else if (r_eta)
            eta_sec = r_eta;
        else if (t_eta)
            eta_sec = t_eta;
        else
            eta_sec = 0;
    } else {
        /*
         * thread is already done or waiting for fsync
         */
        eta_sec = 0;
    }

    return eta_sec;
}
Esempio n. 5
0
File: eta.c Progetto: martin21/fio
/*
 * Best effort calculation of the estimated pending runtime of a job.
 */
static int thread_eta(struct thread_data *td)
{
	unsigned long long bytes_total, bytes_done;
	unsigned long eta_sec = 0;
	unsigned long elapsed;

	elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;

	bytes_total = td->total_io_size;

	/*
	 * if writing, bytes_total will be twice the size. If mixing,
	 * assume a 50/50 split and thus bytes_total will be 50% larger.
	 */
	if (td->o.do_verify && td->o.verify && td_write(td)) {
		if (td_rw(td))
			bytes_total = bytes_total * 3 / 2;
		else
			bytes_total <<= 1;
	}

	if (td->o.zone_size && td->o.zone_skip)
		bytes_total /= (td->o.zone_skip / td->o.zone_size);

	if (td->o.fill_device && td->o.size  == -1ULL)
		return 0;

	if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
		double perc, perc_t;

		bytes_done = td->io_bytes[DDIR_READ] + td->io_bytes[DDIR_WRITE];
		perc = (double) bytes_done / (double) bytes_total;
		if (perc > 1.0)
			perc = 1.0;

		if (td->o.time_based) {
			perc_t = (double) elapsed / (double) td->o.timeout;
			if (perc_t < perc)
				perc = perc_t;
		}

		eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;

		if (td->o.timeout &&
		    eta_sec > (td->o.timeout + done_secs - elapsed))
			eta_sec = td->o.timeout + done_secs - elapsed;
	} else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
			|| td->runstate == TD_INITIALIZED
			|| td->runstate == TD_RAMP
			|| td->runstate == TD_PRE_READING) {
		int t_eta = 0, r_eta = 0;

		/*
		 * We can only guess - assume it'll run the full timeout
		 * if given, otherwise assume it'll run at the specified rate.
		 */
		if (td->o.timeout) {
			t_eta = td->o.timeout + td->o.start_delay;

			if (in_ramp_time(td)) {
				unsigned long ramp_left;

				ramp_left = mtime_since_now(&td->start);
				ramp_left = (ramp_left + 999) / 1000;
				if (ramp_left <= t_eta)
					t_eta -= ramp_left;
			}
		}
		if (td->o.rate[0] || td->o.rate[1]) {
			r_eta = (bytes_total / 1024) / (td->o.rate[0] + td->o.rate[1]);
			r_eta += td->o.start_delay;
		}

		if (r_eta && t_eta)
			eta_sec = min(r_eta, t_eta);
		else if (r_eta)
			eta_sec = r_eta;
		else if (t_eta)
			eta_sec = t_eta;
		else
			eta_sec = 0;
	} else {
		/*
		 * thread is already done or waiting for fsync
		 */
		eta_sec = 0;
	}

	return eta_sec;
}
Esempio n. 6
0
unsigned long time_since_now(struct timeval *s)
{
	return mtime_since_now(s) / 1000;
}
Esempio n. 7
0
unsigned long mtime_since_genesis(void)
{
	return mtime_since_now(&genesis);
}
Esempio n. 8
0
uint64_t mtime_since_genesis(void)
{
	return mtime_since_now(&genesis);
}