int fio_batch_write(struct fio_batch *batch, int fd) { ssize_t bytes_written = fio_writev(fd, batch->iov, batch->iovcnt); if (bytes_written <= 0) return 0; if (bytes_written == batch->bytes) return batch->rows; /* returns the number of written rows */ say_warn("fio_batch_write, [%s]: partial write," " wrote %jd out of %jd bytes", fio_filename(fd), (intmax_t) bytes_written, (intmax_t) batch->bytes); /* Iterate over end of row flags */ struct bit_iterator bit_it; bit_iterator_init(&bit_it, batch->rowflag, batch->max_iov / CHAR_BIT + 1, 1); size_t row_last_iov = bit_iterator_next(&bit_it); int good_rows = 0; /* the number of fully written rows */ ssize_t good_bytes = 0; /* the number of bytes in fully written rows */ ssize_t row_bytes = 0; /* the number of bytes in the current row */ struct iovec *iov = batch->iov; while (iov < batch->iov + batch->iovcnt) { if (good_bytes + row_bytes + iov->iov_len > bytes_written) break; row_bytes += iov->iov_len; if ((iov - batch->iov) == row_last_iov) { /* the end of current row */ good_bytes += row_bytes; row_bytes = 0; good_rows++; row_last_iov = bit_iterator_next(&bit_it); } iov++; } /* * Unwind file position back to ensure we do not leave * partially written rows. */ off_t good_offset = fio_lseek(fd, good_bytes - bytes_written, SEEK_CUR); /* * The caller may choose to close the file right after * a partial write. Don't take chances and make sure that * there is no garbage at the end of file if it happens. */ if (good_offset != -1) (void) fio_truncate(fd, good_offset); /* * writev() doesn't set errno in case of a partial write. * If nothing else from the above failed, set errno to * EAGAIN. */ if (! errno) errno = EAGAIN; return good_rows; /* returns the number of written rows */ }
static void bitset_iterator_prepare_page(struct bitset_iterator *it) { qsort(it->conjs, it->size, sizeof(*it->conjs), bitset_iterator_conj_cmp); bitset_page_set_zeros(it->page); if (it->size > 0) { it->page->first_pos = it->conjs[0].page_first_pos; } else { it->page->first_pos = SIZE_MAX; } /* There is no more conjunctions that can be ORed */ if (it->page->first_pos == SIZE_MAX) return; /* For each conj where conj->page_first_pos == pos */ for (size_t c = 0; c < it->size; c++) { if (it->conjs[c].page_first_pos > it->page->first_pos) break; /* Get result from conj */ bitset_iterator_conj_prepare_page(&it->conjs[c], it->page_tmp); /* OR page from conjunction with it->page */ bitset_page_or(it->page, it->page_tmp); } /* Init the bit iterator on it->page */ bit_iterator_init(&it->page_it, bitset_page_data(it->page), BITSET_PAGE_DATA_SIZE, true); }
/** * Init byte iterator */ static int bitmask_it_init(struct PagePool *pp) { if (!pp->it) pp->it = (struct bit_iterator *)malloc(sizeof(struct bit_iterator)); check_mem(pp->it, sizeof(struct bit_iterator)); bit_iterator_init(pp->it, (const void *)pp->bitmask, pp->page_size*pp->nPages, false); return 0; error: exit(-1); }
int bitset_index_expr_any_set(struct bitset_expr *expr, const void *key, size_t key_size) { bitset_expr_clear(expr); struct bit_iterator bit_it; bit_iterator_init(&bit_it, key, key_size, true); size_t pos; while ( (pos = bit_iterator_next(&bit_it)) != SIZE_MAX) { size_t b = pos + 1; if (bitset_expr_add_conj(expr) != 0) return -1; if (bitset_expr_add_param(expr, b, false) != 0) return -1; } return 0; }
int bitset_index_insert(struct bitset_index *index, const void *key, size_t key_size, size_t value) { assert(index != NULL); assert(key != NULL); assert(index->capacity > 0); /* * Step 0: allocate enough number of bitsets * * bitset_index_reserve could fail on realloc and return -1. * Do not change anything and return the error to the caller. */ const size_t size = 1 + key_size * CHAR_BIT; if (bitset_index_reserve(index, size) != 0) return -1; /* * Step 1: set the 'flag' bitset * * bitset_set for 'falg' bitset could fail on realloc. * Do not change anything. Do not shrink buffers allocated on step 1. */ int rc = bitset_set(index->bitsets[0], value); if (rc < 0) return -1; /* if 1 then the value is new in the index */ index->rollback_buf[0] = (char) rc; /* * Step 2: iterate over 'set' bits in the key and update related bitsets. * * A bitset_set somewhere in the middle also could fail on realloc. * If this happens, we stop processing and jump to the rollback code. * Rollback uses index->rollback_buf buffer to restore previous values * of all bitsets on given position. Remember, that bitset_set * returns 1 if a previous value was 'true' and 0 if it was 'false'. * The buffer is indexed by bytes (char *) instead of bits (bit_set) * because it is a little bit faster here. */ struct bit_iterator bit_it; bit_iterator_init(&bit_it, key, key_size, true); size_t pos = 0; while ((pos = bit_iterator_next(&bit_it)) != SIZE_MAX) { size_t b = pos + 1; rc = bitset_set(index->bitsets[b], value); if (rc < 0) goto rollback; index->rollback_buf[b] = (char) rc; } /* Finish here if the value is new in the index */ if (index->rollback_buf[0] == 0) return 0; /* * Step 3: Iterate over 'unset' bits and cleanup other bitsets * This step is needed if the value was already existed in the index. * Nothing can fail here because current implementation of * bitset_clear never fails. */ bit_iterator_init(&bit_it, key, key_size, false); while ((pos = bit_iterator_next(&bit_it)) != SIZE_MAX) { size_t b = pos + 1; rc = bitset_clear(index->bitsets[b], value); assert(rc >= 0); /* bitset_clear never fails */ } return 0; rollback: /* * Rollback changes done by Step 2. */ bit_iterator_init(&bit_it, key, size, true); size_t rpos; while ((rpos = bit_iterator_next(&bit_it)) != SIZE_MAX && rpos < pos) { size_t b = rpos + 1; if (index->rollback_buf[b] == 1) { bitset_set(index->bitsets[b], value); } else { bitset_clear(index->bitsets[b], value); } } /* * Rollback changes done by Step 1. */ if (index->rollback_buf[0] == 1) { bitset_set(index->bitsets[0], value); } else { bitset_clear(index->bitsets[0], value); } return -1; }