/* * log_read_op -- perform read operation */ static int log_read_op(struct benchmark *bench, struct operation_info *info) { struct log_bench *lb = (struct log_bench *)pmembench_get_priv(bench); assert(lb); struct log_worker_info *worker_info = (struct log_worker_info *)info->worker->priv; assert(worker_info); worker_info->buf_ptr = 0; size_t chunk_size = lb->args->rand ? worker_info->rand_sizes[info->index] : lb->args->el_size; if (!lb->args->fileio) { pmemlog_walk(lb->plp, chunk_size, log_process_data, worker_info); return 0; } int ret; while ((ret = fileio_read(lb->fd, chunk_size, worker_info)) == 1) ; return ret; }
/* * blk_operation -- main operations for blk_read and blk_write benchmark */ static int blk_operation(struct benchmark *bench, struct operation_info *info) { struct blk_bench *bb = (struct blk_bench *)pmembench_get_priv(bench); struct blk_worker *bworker = (struct blk_worker *)info->worker->priv; off_t off = bworker->blocks[info->index]; return bb->worker(bb, info->args, bworker, off); }
/* * memset_exit -- benchmark cleanup function */ static int memset_exit(struct benchmark *bench, struct benchmark_args *args) { auto *mb = (struct memset_bench *)pmembench_get_priv(bench); pmem_unmap(mb->pmem_addr, mb->fsize); free(mb->offsets); free(mb); return 0; }
/* * pobj_direct_op -- main operations of the obj_direct benchmark. */ static int pobj_direct_op(struct benchmark *bench, struct operation_info *info) { auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); auto *pw = (struct pobj_worker *)info->worker->priv; size_t idx = bench_priv->obj(info->index); if (pmemobj_direct(pw->oids[idx]) == nullptr) return -1; return 0; }
/* * pmem_memcpy_exit -- benchmark cleanup */ static int pmem_memcpy_exit(struct benchmark *bench, struct benchmark_args *args) { struct pmem_bench *pmb = (struct pmem_bench *)pmembench_get_priv(bench); munmap(pmb->pmem_addr, pmb->fsize); free(pmb->buf); free(pmb->rand_offsets); free(pmb); return 0; }
/* * blk_init_worker -- initialize worker */ static int blk_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { struct blk_worker *bworker = (struct blk_worker *)malloc(sizeof(*bworker)); if (!bworker) { perror("malloc"); return -1; } struct blk_bench *bb = (struct blk_bench *)pmembench_get_priv(bench); struct blk_args *bargs = (struct blk_args *)args->opts; bworker->seed = os_rand_r(&bargs->seed); bworker->buff = (unsigned char *)malloc(args->dsize); if (!bworker->buff) { perror("malloc"); goto err_buff; } /* fill buffer with some random data */ memset(bworker->buff, bworker->seed, args->dsize); assert(args->n_ops_per_thread != 0); bworker->blocks = (off_t *)malloc(sizeof(*bworker->blocks) * args->n_ops_per_thread); if (!bworker->blocks) { perror("malloc"); goto err_blocks; } if (bargs->rand) { for (size_t i = 0; i < args->n_ops_per_thread; i++) { bworker->blocks[i] = worker->index * bb->blocks_per_thread + os_rand_r(&bworker->seed) % bb->blocks_per_thread; } } else { for (size_t i = 0; i < args->n_ops_per_thread; i++) bworker->blocks[i] = i % bb->blocks_per_thread; } worker->priv = bworker; return 0; err_blocks: free(bworker->buff); err_buff: free(bworker); return -1; }
/* * pobj_free_worker -- worker exit function */ static void pobj_free_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *pw = (struct pobj_worker *)worker->priv; auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); for (size_t i = 0; i < bench_priv->args_priv->n_objs; i++) pmemobj_free(&pw->oids[i]); free(pw->oids); free(pw); }
/* * pobj_open_op -- main operations of the obj_open benchmark. */ static int pobj_open_op(struct benchmark *bench, struct operation_info *info) { auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); size_t idx = bench_priv->pool(info->worker->index); pmemobj_close(bench_priv->pop[idx]); bench_priv->pop[idx] = pmemobj_open(bench_priv->sets[idx], LAYOUT_NAME); if (bench_priv->pop[idx] == nullptr) return -1; return 0; }
/* * log_exit -- cleanup benchmark */ static int log_exit(struct benchmark *bench, struct benchmark_args *args) { struct log_bench *lb = (struct log_bench *)pmembench_get_priv(bench); if (!lb->args->fileio) pmemlog_close(lb->plp); else close(lb->fd); free(lb); return 0; }
/* * memset_op -- actual benchmark operation. It can have one of the four * functions assigned: * libc_memset, * libc_memset_persist, * libpmem_memset_nodrain, * libpmem_memset_persist. */ static int memset_op(struct benchmark *bench, struct operation_info *info) { auto *mb = (struct memset_bench *)pmembench_get_priv(bench); assert(info->index < mb->n_offsets); size_t idx = info->worker->index * info->args->n_ops_per_thread + info->index; void *dest = (char *)mb->pmem_addr + mb->offsets[idx] + mb->pargs->dest_off; int c = mb->const_b; size_t len = mb->pargs->chunk_size; mb->func_op(dest, c, len); return 0; }
/* * pobj_exit -- common part for the benchmarks exit functions */ static int pobj_exit(struct benchmark *bench, struct benchmark_args *args) { size_t i; auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); if (bench_priv->n_pools > 1) { for (i = 0; i < bench_priv->n_pools; i++) { pmemobj_close(bench_priv->pop[i]); free((char *)bench_priv->sets[i]); } } else { pmemobj_close(bench_priv->pop[0]); } free(bench_priv->sets); free(bench_priv->pop); free(bench_priv->rand_sizes); free(bench_priv->random_types); free(bench_priv); return 0; }
/* * log_appendv -- performs pmemlog_appendv operation */ static int log_appendv(struct benchmark *bench, struct operation_info *info) { struct log_bench *lb = (struct log_bench *)pmembench_get_priv(bench); assert(lb); struct log_worker_info *worker_info = (struct log_worker_info *)info->worker->priv; assert(worker_info); struct iovec *iov = &worker_info->iov[info->index * lb->args->vec_size]; if (pmemlog_appendv(lb->plp, iov, lb->args->vec_size) < 0) { perror("pmemlog_appendv"); return -1; } return 0; }
/* * log_append -- performs pmemlog_append operation */ static int log_append(struct benchmark *bench, struct operation_info *info) { struct log_bench *lb = (struct log_bench *)pmembench_get_priv(bench); assert(lb); struct log_worker_info *worker_info = (struct log_worker_info *)info->worker->priv; assert(worker_info); size_t size = lb->args->rand ? worker_info->rand_sizes[info->index] : lb->args->el_size; if (pmemlog_append(lb->plp, worker_info->buf, size) < 0) { perror("pmemlog_append"); return -1; } return 0; }
/* * fileio_append -- performs fileio append operation */ static int fileio_append(struct benchmark *bench, struct operation_info *info) { struct log_bench *lb = (struct log_bench *)pmembench_get_priv(bench); assert(lb); struct log_worker_info *worker_info = (struct log_worker_info *)info->worker->priv; assert(worker_info); size_t size = lb->args->rand ? worker_info->rand_sizes[info->index] : lb->args->el_size; if (write(lb->fd, worker_info->buf, (unsigned)size) != (ssize_t)size) { perror("write"); return -1; } return 0; }
/* * fileio_appendv -- performs fileio appendv operation */ static int fileio_appendv(struct benchmark *bench, struct operation_info *info) { struct log_bench *lb = (struct log_bench *)pmembench_get_priv(bench); assert(lb != NULL); struct log_worker_info *worker_info = (struct log_worker_info *)info->worker->priv; assert(worker_info); struct iovec *iov = &worker_info->iov[info->index * lb->args->vec_size]; size_t vec_size = worker_info->vec_sizes[info->index]; if (writev(lb->fd, iov, lb->args->vec_size) != (ssize_t)vec_size) { perror("writev"); return -1; } return 0; }
/* * pobj_direct_op -- main operations of the obj_direct benchmark. */ static int pobj_direct_op(struct benchmark *bench, struct operation_info *info) { auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); auto *pw = (struct pobj_worker *)info->worker->priv; size_t idx = bench_priv->obj(info->index); /* Query an invalid uuid:off pair to invalidate the cache. */ PMEMoid bad = {1, 1}; #define OBJ_DIRECT_NITER 1024 /* * As we measure a very fast operation, we need a loop inside the * test harness. */ for (int i = 0; i < OBJ_DIRECT_NITER; i++) { if (pmemobj_direct(pw->oids[idx]) == nullptr) return -1; if (pmemobj_direct(bad) != nullptr) return -1; } return 0; #undef OBJ_DIRECT_NITER }
/* * pobj_init_worker -- worker initialization */ static int pobj_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { size_t i, idx = worker->index; auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); auto *pw = (struct pobj_worker *)calloc(1, sizeof(struct pobj_worker)); if (pw == nullptr) { perror("calloc"); return -1; } worker->priv = pw; pw->oids = (PMEMoid *)calloc(bench_priv->args_priv->n_objs, sizeof(PMEMoid)); if (pw->oids == nullptr) { free(pw); perror("calloc"); return -1; } PMEMobjpool *pop = bench_priv->pop[bench_priv->pool(idx)]; for (i = 0; i < bench_priv->args_priv->n_objs; i++) { size_t size = bench_priv->fn_size(bench_priv, i); size_t type = bench_priv->fn_type_num(bench_priv, idx, i); if (pmemobj_alloc(pop, &pw->oids[i], size, type, nullptr, nullptr) != 0) { perror("pmemobj_alloc"); goto out; } } return 0; out: for (; i > 0; i--) pmemobj_free(&pw->oids[i - 1]); free(pw->oids); free(pw); return -1; }
/* * blk_exit -- function for de-initialization benchmark */ static int blk_exit(struct benchmark *bench, struct benchmark_args *args) { struct blk_bench *bb = (struct blk_bench *)pmembench_get_priv(bench); struct blk_args *ba = (struct blk_args *)args->opts; if (ba->file_io) { os_close(bb->fd); } else { pmemblk_close(bb->pbp); int result = pmemblk_check(args->fname, args->dsize); if (result < 0) { perror("pmemblk_check error"); return -1; } else if (result == 0) { perror("pmemblk_check: not consistent"); return -1; } } free(bb); return 0; }
/* * pmem_memcpy_operation -- actual benchmark operation * * Depending on the memcpy flag "-m" tested operation will be memcpy() * or pmem_memcpy_persist(). */ static int pmem_memcpy_operation(struct benchmark *bench, struct operation_info *info) { struct pmem_bench *pmb = (struct pmem_bench *)pmembench_get_priv(bench); uint64_t src_index = info->args->n_ops_per_thread * info->worker->index + pmb->func_src(pmb, info->index); uint64_t dest_index = info->args->n_ops_per_thread * info->worker->index + pmb->func_dest(pmb, info->index); void *source = pmb->src_addr + src_index * pmb->pargs->chunk_size + pmb->pargs->src_off; void *dest = pmb->dest_addr + dest_index * pmb->pargs->chunk_size + pmb->pargs->dest_off; size_t len = pmb->pargs->chunk_size; pmb->func_op(dest, source, len); return 0; }
/* * log_init_worker -- init benchmark worker */ static int log_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { int ret = 0; struct log_bench *lb = (struct log_bench *)pmembench_get_priv(bench); size_t i_size, n_vectors; assert(lb); struct log_worker_info *worker_info = (struct log_worker_info *)malloc( sizeof(struct log_worker_info)); if (!worker_info) { perror("malloc"); return -1; } /* allocate buffer for append / read */ worker_info->buf_size = lb->args->el_size * lb->args->vec_size; worker_info->buf = (char *)malloc(worker_info->buf_size); if (!worker_info->buf) { perror("malloc"); ret = -1; goto err_free_worker_info; } /* * For random mode, each operation has its own vector with * random sizes. Otherwise there is only one vector with * equal sizes. */ n_vectors = args->n_ops_per_thread; worker_info->iov = (struct iovec *)malloc( n_vectors * lb->args->vec_size * sizeof(struct iovec)); if (!worker_info->iov) { perror("malloc"); ret = -1; goto err_free_buf; } if (lb->args->rand) { /* each thread has random seed */ worker_info->seed = (unsigned)rand_r(&lb->seed); /* each vector element has its own random size */ uint64_t n_sizes = args->n_ops_per_thread * lb->args->vec_size; worker_info->rand_sizes = (size_t *)malloc( n_sizes * sizeof(*worker_info->rand_sizes)); if (!worker_info->rand_sizes) { perror("malloc"); ret = -1; goto err_free_iov; } /* generate append sizes */ for (uint64_t i = 0; i < n_sizes; i++) { uint32_t hr = (uint32_t)rand_r(&worker_info->seed); uint32_t lr = (uint32_t)rand_r(&worker_info->seed); uint64_t r64 = (uint64_t)hr << 32 | lr; size_t width = lb->args->el_size - lb->args->min_size; worker_info->rand_sizes[i] = r64 % width + lb->args->min_size; } } else { worker_info->rand_sizes = NULL; } worker_info->vec_sizes = (size_t *)calloc( args->n_ops_per_thread, sizeof(*worker_info->vec_sizes)); if (!worker_info->vec_sizes) { perror("malloc\n"); ret = -1; goto err_free_rand_sizes; } /* fill up the io vectors */ i_size = 0; for (size_t n = 0; n < args->n_ops_per_thread; n++) { size_t buf_ptr = 0; size_t vec_off = n * lb->args->vec_size; for (int i = 0; i < lb->args->vec_size; ++i) { size_t el_size = lb->args->rand ? worker_info->rand_sizes[i_size++] : lb->args->el_size; worker_info->iov[vec_off + i].iov_base = &worker_info->buf[buf_ptr]; worker_info->iov[vec_off + i].iov_len = el_size; worker_info->vec_sizes[n] += el_size; buf_ptr += el_size; } } worker->priv = worker_info; return 0; err_free_rand_sizes: free(worker_info->rand_sizes); err_free_iov: free(worker_info->iov); err_free_buf: free(worker_info->buf); err_free_worker_info: free(worker_info); return ret; }