/* * Leaves f->fd open on success, caller must close */ static int extend_file(struct thread_data *td, struct fio_file *f) { int r, new_layout = 0, unlink_file = 0, flags; unsigned long long left; unsigned int bs; char *b; if (read_only) { log_err("fio: refusing extend of file due to read-only\n"); return 0; } /* * check if we need to lay the file out complete again. fio * does that for operations involving reads, or for writes * where overwrite is set */ if (td_read(td) || (td_write(td) && td->o.overwrite) || (td_write(td) && td->io_ops->flags & FIO_NOEXTEND)) new_layout = 1; if (td_write(td) && !td->o.overwrite) unlink_file = 1; if (unlink_file || new_layout) { dprint(FD_FILE, "layout unlink %s\n", f->file_name); if ((unlink(f->file_name) < 0) && (errno != ENOENT)) { td_verror(td, errno, "unlink"); return 1; } } flags = O_WRONLY | O_CREAT; if (new_layout) flags |= O_TRUNC; dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags); f->fd = open(f->file_name, flags, 0644); if (f->fd < 0) { td_verror(td, errno, "open"); return 1; } #ifdef CONFIG_POSIX_FALLOCATE if (!td->o.fill_device) { switch (td->o.fallocate_mode) { case FIO_FALLOCATE_NONE: break; case FIO_FALLOCATE_POSIX: dprint(FD_FILE, "posix_fallocate file %s size %llu\n", f->file_name, (unsigned long long) f->real_file_size); r = posix_fallocate(f->fd, 0, f->real_file_size); if (r > 0) { log_err("fio: posix_fallocate fails: %s\n", strerror(r)); } break; #ifdef CONFIG_LINUX_FALLOCATE case FIO_FALLOCATE_KEEP_SIZE: dprint(FD_FILE, "fallocate(FALLOC_FL_KEEP_SIZE) " "file %s size %llu\n", f->file_name, (unsigned long long) f->real_file_size); r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0, f->real_file_size); if (r != 0) td_verror(td, errno, "fallocate"); break; #endif /* CONFIG_LINUX_FALLOCATE */ default: log_err("fio: unknown fallocate mode: %d\n", td->o.fallocate_mode); assert(0); } } #endif /* CONFIG_POSIX_FALLOCATE */ if (!new_layout) goto done; /* * The size will be -1ULL when fill_device is used, so don't truncate * or fallocate this file, just write it */ if (!td->o.fill_device) { dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name, (unsigned long long) f->real_file_size); if (ftruncate(f->fd, f->real_file_size) == -1) { td_verror(td, errno, "ftruncate"); goto err; } } b = malloc(td->o.max_bs[DDIR_WRITE]); left = f->real_file_size; while (left && !td->terminate) { bs = td->o.max_bs[DDIR_WRITE]; if (bs > left) bs = left; fill_io_buffer(td, b, bs, bs); r = write(f->fd, b, bs); if (r > 0) { left -= r; continue; } else { if (r < 0) { int __e = errno; if (__e == ENOSPC) { if (td->o.fill_device) break; log_info("fio: ENOSPC on laying out " "file, stopping\n"); break; } td_verror(td, errno, "write"); } else td_verror(td, EIO, "write"); break; } } if (td->terminate) { dprint(FD_FILE, "terminate unlink %s\n", f->file_name); unlink(f->file_name); } else if (td->o.create_fsync) { if (fsync(f->fd) < 0) { td_verror(td, errno, "fsync"); goto err; } } if (td->o.fill_device && !td_write(td)) { fio_file_clear_size_known(f); if (td_io_get_file_size(td, f)) goto err; if (f->io_size > f->real_file_size) f->io_size = f->real_file_size; } free(b); done: return 0; err: close(f->fd); f->fd = -1; return 1; }
/* * Called to complete min_events number of io for the async engines. */ int io_u_queued_complete(struct thread_data *td, int min_evts, uint64_t *bytes) { struct io_completion_data icd; struct timespec *tvp = NULL; int ret; struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts); if (!min_evts) tvp = &ts; ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); if (ret < 0) { td_verror(td, -ret, "td_io_getevents"); return ret; } else if (!ret) return ret; init_icd(td, &icd, ret); ios_completed(td, &icd); if (icd.error) { td_verror(td, icd.error, "io_u_queued_complete"); return -1; } if (bytes) { int ddir; for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) bytes[ddir] += icd.bytes_done[ddir]; } return 0; } /* * Call when io_u is really queued, to update the submission latency. */ void io_u_queued(struct thread_data *td, struct io_u *io_u) { if (!td->o.disable_slat) { unsigned long slat_time; slat_time = utime_since(&io_u->start_time, &io_u->issue_time); add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen); } } void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, unsigned int max_bs) { if (td->o.buffer_pattern_bytes) fill_buffer_pattern(td, buf, max_bs); else if (!td->o.zero_buffers) { unsigned int perc = td->o.compress_percentage; if (perc) { unsigned int seg = min_write; seg = min(min_write, td->o.compress_chunk); if (!seg) seg = min_write; fill_random_buf_percentage(&td->buf_state, buf, perc, seg, max_bs); } else fill_random_buf(&td->buf_state, buf, max_bs); } else memset(buf, 0, max_bs); } /* * "randomly" fill the buffer contents */ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, unsigned int min_write, unsigned int max_bs) { io_u->buf_filled_len = 0; fill_io_buffer(td, io_u->buf, min_write, max_bs); }
int main(int argc, char *argv[]) { int nb_threads = atoi(argv[3]); int blocksize = atoi(argv[2]); int index; char filename[6]; char* mode = argv[1]; int i = 0; double latency, throughput; double avg_latency = 0, avg_throughput = 0, total_throughput = 0; ThreadData thread[nb_threads]; ThreadData empty_loop; pthread_t empty_loop_id; int random_int; int re; srand(time(NULL)); random_int = rand()%(MAX_SIZE-blocksize); /* Fill the buffer where read and write buffer is to be tested */ fill_io_buffer(); /* Detect mode betweem Sequential, Random */ index = detect_mode(mode); printf("\nBLOCKSIZE : %d B\n",blocksize); printf("THREADS : %d",nb_threads); printf("\n=======================================\n\n"); /* Calculating empty loop_size latency*/ empty_loop.blocksize = blocksize/nb_threads; empty_loop.diff = 0; pthread_create(&empty_loop_id,NULL,loop_time,(void *) argv); pthread_join(empty_loop_id, NULL); /* Creating threads */ for(i=0; i < nb_threads; i++){ thread[i].blocksize = blocksize/nb_threads; thread[i].random_int = random_int; thread[i].diff = 0; /* Determining thread to be created (Sequential, Random) */ switch(index){ case 0: re = pthread_create(&(thread[i].thread_id),NULL,read_write_seq_memory,(void *)(&thread[i])); break; case 1: re = pthread_create(&(thread[i].thread_id),NULL,read_write_ran_memory,(void *)(&thread[i])); break; default: return 0; break; } if(re == -1){ printf("Error creating thread %d",i+1); } else{ printf("Thread %d/%d created.\n",i+1,nb_threads); } } printf("---------------------------------------\n\n"); /* Wait for all the threads to complete */ for(i=0; i < nb_threads; i++){ pthread_join(thread[i].thread_id, NULL); } sleep(2); /* Calculating and printing throughput and latency for each thread */ for(i=0; i < nb_threads; i++){ latency = ((thread[i].diff)-empty_loop.diff); //We substract the empty loop latency throughput = ((thread[i].blocksize/1000000.0)/(latency)); avg_latency +=latency; total_throughput += throughput; printf("Thread : %d\n", i+1); printf("Blocksize : %d B\n", thread[i].blocksize); printf("Latency : %.5f ms\n",(latency*1000)); printf("Throughput : %.2f MB/s\n\n",throughput); } /* Calculating average throughput and latency */ avg_latency /= nb_threads; avg_throughput = total_throughput/nb_threads; printf("---------------------------------------\n"); printf("Average latency : %.5f ms\n", (avg_latency*1000)); printf("Average throughput : %.5f ms\n", avg_throughput); printf("Total throughput : %.2f MB/s", total_throughput); printf("\n---------------------------------------\n"); return 0; }