//------------------------------------------------ // Runs in every thread of every read queue, pops // readreq objects, does the read and reports the // read transaction duration. // static void* run_reads(void* pv_req_queue) { cf_queue* p_req_queue = (cf_queue*)pv_req_queue; readreq* p_readreq; while (g_running) { if (cf_queue_pop(p_req_queue, (void*)&p_readreq, 100) != CF_QUEUE_OK) { continue; } if (g_use_valloc) { uint8_t* p_buffer = cf_valloc(p_readreq->size); if (p_buffer) { read_and_report(p_readreq, p_buffer); free(p_buffer); } else { fprintf(stdout, "ERROR: read buffer cf_valloc()\n"); } } else { uint8_t stack_buffer[p_readreq->size + 4096]; uint8_t* p_buffer = align_4096(stack_buffer); read_and_report(p_readreq, p_buffer); } free(p_readreq); cf_atomic_int_decr(&g_read_reqs_queued); } return (0); }
//------------------------------------------------ // Discover device's minimum direct IO op size. // static uint64_t discover_min_op_bytes(int fd, const char *name) { off_t off = lseek(fd, 0, SEEK_SET); if (off != 0) { printf("=> ERROR: %s seek\n", name); return 0; } uint8_t *buf = cf_valloc(HI_IO_MIN_SIZE); size_t read_sz = LO_IO_MIN_SIZE; while (read_sz <= HI_IO_MIN_SIZE) { if (read(fd, (void*)buf, read_sz) == (ssize_t)read_sz) { free(buf); return read_sz; } read_sz <<= 1; // LO_IO_MIN_SIZE and HI_IO_MIN_SIZE are powers of 2 } printf("=> ERROR: %s read failed at all sizes from %u to %u bytes\n", name, LO_IO_MIN_SIZE, HI_IO_MIN_SIZE); free(buf); return 0; }
//------------------------------------------------ // Write to sub_sectors function for JNA // bool writeJNA(uint64_t division, char* message, uint32_t write_size){ void *p_buffer = cf_valloc(g_device->read_bytes); if (! p_buffer) { printf("=> ERROR: read buffer cf_valloc()\n"); return false; } uint64_t offset = division / g_ref_tab_columns; offset = (offset % g_device->num_read_offsets) * g_device->min_op_bytes; if(is_sector_free(offset/g_device->read_bytes, division % g_ref_tab_columns)){ prep_to_sector_div(offset, division % g_ref_tab_columns, p_buffer, message, write_size); if (! write_to_device(g_device, offset, g_device->read_bytes, p_buffer)){ printf("=> ERROR write op on offset: %" PRIu64 "\n", offset); free(p_buffer); return false; }else{ add_sector_ref(offset/g_device->read_bytes, division % g_ref_tab_columns); } }else{ printf("=> Sector ALREADY referenced!\n"); } free(p_buffer); return true; }
//------------------------------------------------ // Create device large block read buffer. // static bool create_large_block_read_buffer(device* p_device) { if (! (p_device->p_large_block_read_buffer = cf_valloc(g_large_block_ops_bytes))) { fprintf(stdout, "ERROR: large block read buffer cf_valloc()\n"); return false; } return true; }
//------------------------------------------------ // Read from sub_sectors function for JNA // char* readJNA(uint64_t division, uint32_t read_size){ int sector_div = g_device->read_bytes/g_ref_tab_columns; char* message = cf_valloc(sector_div); void* p_buffer = cf_valloc(g_device->read_bytes); if (! p_buffer) { printf("=> ERROR: read buffer cf_valloc()\n"); return NULL; } uint64_t offset = division / g_ref_tab_columns; offset = (offset % g_device->num_read_offsets) * g_device->min_op_bytes; if(! is_sector_free(offset/g_device->read_bytes, division % g_ref_tab_columns)){ if (! read_from_device(g_device, offset, g_device->read_bytes, p_buffer)){ printf("=> ERROR read op on offset: %" PRIu64 "\n", offset); free(p_buffer); free(message); return NULL; }else{ memset(message, '\0', sizeof(message)); if (read_size > 0 && read_size < sector_div){ strncpy(message, p_buffer+(sector_div*(division % g_ref_tab_columns)), read_size); }else if(read_size >= sector_div){ strncpy(message, p_buffer+(sector_div*(division % g_ref_tab_columns)), sector_div-1); } // printf("readSize = %"PRIu32"\n", read_size); printf("Message = %s\n", message); } }else{ printf("=> Sector NOT referenced!\n"); } free(p_buffer); free(message); return message; }
//------------------------------------------------ // Create large block write buffers. // static bool create_salters() { if (! g_num_write_buffers) { if (! (g_salters[0].p_buffer = cf_valloc(g_large_block_ops_bytes))) { fprintf(stdout, "ERROR: large block write buffer cf_valloc()\n"); return false; } memset(g_salters[0].p_buffer, 0, g_large_block_ops_bytes); g_num_write_buffers = 1; return true; } uint8_t seed_buffer[RAND_SEED_SIZE]; if (! rand_seed(seed_buffer)) { return false; } for (uint32_t n = 0; n < g_num_write_buffers; n++) { if (! (g_salters[n].p_buffer = cf_valloc(g_large_block_ops_bytes))) { fprintf(stdout, "ERROR: large block write buffer cf_valloc()\n"); return false; } if (! rand_fill(g_salters[n].p_buffer, g_large_block_ops_bytes)) { return false; } if (g_num_write_buffers > 1) { pthread_mutex_init(&g_salters[n].lock, NULL); } } return true; }
static void* generate_async_reads(void* aio_context) { uint64_t count = 0; while(g_running) { /* Create the struct of info needed at the process_read end */ uintptr_t info_ptr; if (cf_queue_pop(async_info_queue, (void*)&info_ptr, CF_QUEUE_NOWAIT) != CF_QUEUE_OK) { fprintf(stdout, "Error: Could not pop info struct \n"); return (void*)(-1); } as_async_info_t *info = (as_async_info_t*)info_ptr; memset(info, 0, sizeof(as_async_info_t)); /* Generate the actual read request */ uint32_t random_device_index = rand_32() % g_num_devices; device* p_random_device = &g_devices[random_device_index]; readreq* p_readreq = &(info->p_readreq); if(p_readreq == NULL) { fprintf(stdout, "Error: preadreq null \n"); goto fail; } p_readreq->p_device = p_random_device; p_readreq->offset = random_read_offset(p_random_device); p_readreq->size = g_read_req_num_512_blocks * MIN_BLOCK_BYTES; p_readreq->start_time = cf_getms(); /* Async read */ if (g_use_valloc) { uint8_t* p_buffer = cf_valloc(p_readreq->size); info->p_buffer = p_buffer; if (p_buffer) { uint64_t raw_start_time = cf_getms(); info->raw_start_time = raw_start_time; if(read_async_from_device(info, *(aio_context_t *)aio_context) < 0) { fprintf(stdout, "Error: Async read failed \n"); free(p_buffer); goto fail; } } else { fprintf(stdout, "ERROR: read buffer cf_valloc()\n"); } } else { uint8_t stack_buffer[p_readreq->size + 4096]; uint8_t* p_buffer = align_4096(stack_buffer); info->p_buffer = p_buffer; uint64_t raw_start_time = cf_getms(); info->raw_start_time = raw_start_time; if(read_async_from_device(info, *(aio_context_t*)aio_context) < 0) { fprintf(stdout, "Error: Async read failed \n"); goto fail; } } if (cf_atomic_int_incr(&g_read_reqs_queued) > MAX_READ_REQS_QUEUED) { fprintf(stdout, "ERROR: too many read reqs queued\n"); fprintf(stdout, "drive(s) can't keep up - test stopped\n"); g_running = false; return (void*)-1;; } count++; int sleep_ms = (int) (((count * 1000) / g_read_reqs_per_sec) - (cf_getms() - g_run_start_ms)); if (sleep_ms > 0) { usleep((uint32_t)sleep_ms * 1000); } continue; /* Rollback for failure */ fail: if(info) { uintptr_t temp = (uintptr_t)info; cf_queue_push(async_info_queue, (void*)&temp); } } return (0); }