static uint32_t gen_cksum(char *ptr, int len) { int i; unsigned char *md; uint32_t ret; md = malloc(MD5_DIGEST_LENGTH); if(!md) return 0; /* Convert L1 table to big endian */ for(i = 0; i < len / sizeof(uint64_t); i++) { cpu_to_be64s(&((uint64_t*) ptr)[i]); } /* Generate checksum */ if (MD5((unsigned char *)ptr, len, md) != md) ret = 0; else memcpy(&ret, md, sizeof(uint32_t)); /* Convert L1 table back to native endianess */ for(i = 0; i < len / sizeof(uint64_t); i++) { be64_to_cpus(&((uint64_t*) ptr)[i]); } free(md); return ret; }
static inline void bitmap_dir_entry_to_be(Qcow2BitmapDirEntry *entry) { cpu_to_be64s(&entry->bitmap_table_offset); cpu_to_be32s(&entry->bitmap_table_size); cpu_to_be32s(&entry->flags); cpu_to_be16s(&entry->name_size); cpu_to_be32s(&entry->extra_data_size); }
static inline void bitmap_table_to_be(uint64_t *bitmap_table, size_t size) { size_t i; for (i = 0; i < size; ++i) { cpu_to_be64s(&bitmap_table[i]); } }
static uint32_t gen_cksum(char *ptr, int len) { int i; uint32_t md[4]; /* Convert L1 table to big endian */ for(i = 0; i < len / sizeof(uint64_t); i++) { cpu_to_be64s(&((uint64_t*) ptr)[i]); } /* Generate checksum */ gcry_md_hash_buffer(GCRY_MD_MD5, md, ptr, len); /* Convert L1 table back to native endianess */ for(i = 0; i < len / sizeof(uint64_t); i++) { be64_to_cpus(&((uint64_t*) ptr)[i]); } return md[0]; }
static void prepare_read_write (RandomIO * r) { /* Do a READ or WRITE? */ if (random () % 2) { r->type = OP_READ; } else { r->type = OP_WRITE; } /* Find the next region to perform io. */ do { if (parallel <= 1 || (random () % 2 == 0)) { /* Perform a random I/O. */ r->sector_num = rand64 () % total_sectors; } else { /* Perform an I/O next to a currently ongoing I/O. */ int id; do { id = random () % parallel; } while (id == r->tester); RandomIO *p = &testers[id]; r->sector_num = p->sector_num + 2 * io_size - rand64 () % (4 * io_size); if (r->sector_num < 0) { r->sector_num = 0; } else if (r->sector_num >= total_sectors) { r->sector_num = total_sectors - 1; } } r->nb_sectors = 1 + rand64 () % io_size; if (r->sector_num + r->nb_sectors > total_sectors) { r->nb_sectors = total_sectors - r->sector_num; } } while (check_conflict (r)); if (r->type == OP_WRITE) { /* Fill test_buf with random data. */ int i, j; for (i = 0; i < r->nb_sectors; i++) { const uint64_t TEST_MAGIC = 0x0123456789ABCDEFULL; /* This first 8 bytes of the sector stores the current testing * round. The next 8 bytes store a magic number. This info helps * debugging. */ uint64_t *p = (uint64_t *) & r->test_buf[i * 512]; *p = r->uuid; cpu_to_be64s (p); p++; *p = TEST_MAGIC; cpu_to_be64s (p); /* The rest of the sector are filled with random data. */ uint32_t *q = (uint32_t *) (p + 1); int n = (512 - 2 * sizeof (uint64_t)) / sizeof (uint32_t); for (j = 0; j < n; j++) { *q++ = random (); } } } /* Determine the number of iov. */ int niov = 0; uint8_t *p = r->test_buf; int left = r->nb_sectors; do { if (niov == max_iov - 1) { r->qiov.iov[niov].iov_len = left * 512; r->qiov.iov[niov].iov_base = p; niov++; break; } int nb = 1 + random () % left; r->qiov.iov[niov].iov_len = nb * 512; r->qiov.iov[niov].iov_base = p; p += r->qiov.iov[niov].iov_len; left -= nb; niov++; } while (left > 0); qemu_iovec_init_external (&r->qiov, r->qiov.iov, niov); }