void Table::put_over (Block *a, Block *b) { Block *la, *lb; la = last_block (a); lb = last_block (b); desenlazar (a, la); enlazar (lb, a, la); }
/* * Return the next free block in the map. */ static int get_next_free_block(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, unsigned long long *b) { unsigned long long block, min_bs = td->o.rw_min_bs, lastb; int i; lastb = last_block(td, f, ddir); if (!lastb) return 1; i = f->last_free_lookup; block = i * BLOCKS_PER_MAP; while (block * min_bs < f->real_file_size && block * min_bs < f->io_size) { if (f->file_map[i] != -1UL) { block += ffz(f->file_map[i]); if (block > lastb) break; f->last_free_lookup = i; *b = block; return 0; } block += BLOCKS_PER_MAP; i++; } dprint(FD_IO, "failed finding a free block\n"); return 1; }
/* * Return the next free block in the map. */ static int get_next_free_block(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, unsigned long long *b) { unsigned long long min_bs = td->o.rw_min_bs, lastb; int i; lastb = last_block(td, f, ddir); if (!lastb) return 1; i = f->last_free_lookup; *b = (i * BLOCKS_PER_MAP); while ((*b) * min_bs < f->real_file_size && (*b) * min_bs < f->io_size) { if (f->file_map[i] != (unsigned int) -1) { *b += ffz(f->file_map[i]); if (*b > lastb) break; f->last_free_lookup = i; return 0; } *b += BLOCKS_PER_MAP; i++; } dprint(FD_IO, "failed finding a free block\n"); return 1; }
void Table::retornar (Block *b) { Block * pos_dest; pos_dest = b + total_blocks; pos_dest = last_block (pos_dest); desenlazar (b); enlazar (pos_dest, b); }
static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *b) { uint64_t r, lastb; lastb = last_block(td, f, ddir); if (!lastb) return 1; if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) { uint64_t rmax; rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX; if (td->o.use_os_rand) { rmax = OS_RAND_MAX; r = os_random_long(&td->random_state); } else { rmax = FRAND_MAX; r = __rand(&td->__random_state); } dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); *b = (lastb - 1) * (r / ((uint64_t) rmax + 1.0)); } else { uint64_t off = 0; if (lfsr_next(&f->lfsr, &off, lastb)) return 1; *b = off; } /* * if we are not maintaining a random map, we are done. */ if (!file_randommap(td, f)) goto ret; /* * calculate map offset and check if it's free */ if (random_map_free(f, *b)) goto ret; dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", (unsigned long long) *b); *b = axmap_next_free(f->io_axmap, *b); if (*b == (uint64_t) -1ULL) return 1; ret: return 0; }
static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, unsigned long long *b) { unsigned long long r, lastb; int loops = 5; lastb = last_block(td, f, ddir); if (!lastb) return 1; do { r = os_random_long(&td->random_state); dprint(FD_RANDOM, "off rand %llu\n", r); *b = (lastb - 1) * (r / ((unsigned long long) OS_RAND_MAX + 1.0)); /* * if we are not maintaining a random map, we are done. */ if (!file_randommap(td, f)) return 0; /* * calculate map offset and check if it's free */ if (random_map_free(f, *b)) return 0; dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", *b); } while (--loops); /* * we get here, if we didn't suceed in looking up a block. generate * a random start offset into the filemap, and find the first free * block from there. */ loops = 10; do { f->last_free_lookup = (f->num_maps - 1) * (r / (OS_RAND_MAX + 1.0)); if (!get_next_free_block(td, f, ddir, b)) return 0; r = os_random_long(&td->random_state); } while (--loops); /* * that didn't work either, try exhaustive search from the start */ f->last_free_lookup = 0; return get_next_free_block(td, f, ddir, b); }
static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *b) { uint64_t r; if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE || td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) { uint64_t frand_max, lastb; lastb = last_block(td, f, ddir); if (!lastb) return 1; frand_max = rand_max(&td->random_state); r = __rand(&td->random_state); dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); *b = lastb * (r / ((uint64_t) frand_max + 1.0)); } else { uint64_t off = 0; assert(fio_file_lfsr(f)); if (lfsr_next(&f->lfsr, &off)) return 1; *b = off; } /* * if we are not maintaining a random map, we are done. */ if (!file_randommap(td, f)) goto ret; /* * calculate map offset and check if it's free */ if (random_map_free(f, *b)) goto ret; dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", (unsigned long long) *b); *b = axmap_next_free(f->io_axmap, *b); if (*b == (uint64_t) -1ULL) return 1; ret: return 0; }
/* * malloc */ void *malloc (size_t size) { checkheap(1); // Let's make sure the heap is ok! unsigned int awords; //Adjusted block size unsigned int ewords; //Amount to extend heap if no matching uint32_t *block; uint32_t * heap_lastp = last_block(); if (VERBOSE) printf("Malloc %d bytes\n", (int)size); /* Ignore 0 requests */ if (size == 0) return NULL; /* Adjust size to include alignment and convert to multipes of 4 bytes */ if (size <= DSIZE) awords = 2; else awords = (((size) + (DSIZE-1)) & ~0x7) / WSIZE; /* Search the free list for a fit */ if ((block = find_fit(awords)) != NULL) { place(block, awords); //printf("3\n"); return block_mem(block); } /* No fit found. Get more memory and place the block */ if (awords > CHUNKSIZE) ewords = awords; else if (0) ewords = awords; else ewords = CHUNKSIZE; if (block_free(heap_lastp)) { ENSURES(block_size(heap_lastp) < ewords); ewords = ewords - block_size(heap_lastp) + 2; //ewords += 2; //printf("1\n"); } else { ewords += 2; // ask for 2 more for the header and footer //printf("2\n"); } if ((block = extend_heap(ewords)) == NULL) return NULL; place(block, awords); return block_mem(block); }
static int get_off_from_method(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *b) { if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) { uint64_t lastb; lastb = last_block(td, f, ddir); if (!lastb) return 1; return __get_next_rand_offset(td, f, ddir, b, lastb); } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) return __get_next_rand_offset_zipf(td, f, ddir, b); else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) return __get_next_rand_offset_pareto(td, f, ddir, b); else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS) return __get_next_rand_offset_gauss(td, f, ddir, b); else if (td->o.random_distribution == FIO_RAND_DIST_ZONED) return __get_next_rand_offset_zoned(td, f, ddir, b); log_err("fio: unknown random distribution: %d\n", td->o.random_distribution); return 1; }
static int __get_next_rand_offset_zoned(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *b) { unsigned int v, send, stotal; uint64_t offset, lastb; static int warned; struct zone_split_index *zsi; lastb = last_block(td, f, ddir); if (!lastb) return 1; if (!td->o.zone_split_nr[ddir]) { bail: return __get_next_rand_offset(td, f, ddir, b, lastb); } /* * Generate a value, v, between 1 and 100, both inclusive */ v = rand32_between(&td->zone_state, 1, 100); zsi = &td->zone_state_index[ddir][v - 1]; stotal = zsi->size_perc_prev; send = zsi->size_perc; /* * Should never happen */ if (send == -1U) { if (!warned) { log_err("fio: bug in zoned generation\n"); warned = 1; } goto bail; } /* * 'send' is some percentage below or equal to 100 that * marks the end of the current IO range. 'stotal' marks * the start, in percent. */ if (stotal) offset = stotal * lastb / 100ULL; else offset = 0; lastb = lastb * (send - stotal) / 100ULL; /* * Generate index from 0..send-of-lastb */ if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1) return 1; /* * Add our start offset, if any */ if (offset) *b += offset; return 0; }
/* * Set the bsize of the last atom and all closed blocks and * update the fsize and csize field of the enclosing blocks, * and remove the closed blocks from the block queue. * * The queue contains blocks B_0 ... B_n. * * If the closed blocks are B_k ... B_n with k > 0 * - the bsizes of B_{k+1} ... B_n and of the last atom are set. * - the csize and fsize fields of B_n are updated based on the * last atom's bsize. * - the csize and fsize fields of B_i are updated based on * B{i+1} bsize (for i=n-1 to k-1). * * If k=0 then we also update the lead token based on B_0's bsize. */ static void set_bsizes_and_close(formatter_t *f) { pp_atomic_token_t *last; pp_block_t *b; pp_open_token_t *tk; uint32_t csize, n; // Set bsize of the last atom last = f->last_atom; csize = 0; if (last != NULL) { assert(f->atom_col <= f->length); csize = f->length - f->atom_col; last->bsize = csize; } /* * Set bsize, csize, and fsize of all closed blocks * and remove them from the queue. */ assert(f->queue_size >= f->nclosed); n = f->nclosed; f->queue_size -= n; f->nclosed = 0; while (n > 0) { b = last_block(&f->block_queue); tk = b->token; // csize is 0 or the bsize of a sub-block or atom of tk if (tk->fsize == 0) { // first sub block closed tk->fsize = csize; tk->csize = csize; } else if (tk->csize < csize) { tk->csize = csize; } // compute the bsize of that block assert(b->col <= f->length); csize = f->length - b->col; tk->bsize = csize; pop_last_block(&f->block_queue); n --; } if (csize > 0) { /* * Set the csize and fsize of the head block * or of the last (open) block in the queue. */ if (block_queue_is_empty(&f->block_queue)) { // all blocks are closed // csize = bsize of block B_0 or last_atom assert(f->queue_size == 0); tk = f->head_token; if (tk != NULL) { if (tk->fsize == 0) { tk->fsize = csize; tk->csize = csize; } else if (tk->csize < csize) { tk->csize = csize; } } } else { // update csize and fsize of the last block // in the queue b = last_block(&f->block_queue); tk = b->token; if (tk->fsize == 0) { tk->fsize = csize; tk->csize = csize; } else if (tk->csize < csize) { tk->csize = csize; } } } }