/* * Validate object alignment cache behavior for caches */ static int splat_kmem_test7(struct file *file, void *arg) { char *name = SPLAT_KMEM_TEST7_NAME; int max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2; int i, rc; for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) { uint32_t size; get_random_bytes((void *)&size, sizeof (uint32_t)); size = MAX(size % (max_size + 1), 32); rc = splat_kmem_cache_test(file, arg, name, size, i, 0); if (rc) return rc; rc = splat_kmem_cache_test(file, arg, name, size, i, KMC_OFFSLAB); if (rc) return rc; } return rc; }
/* Validate large object cache behavior for dynamic/kmem/vmem caches */ static int splat_kmem_test6(struct file *file, void *arg) { char *name = SPLAT_KMEM_TEST6_NAME; int rc; rc = splat_kmem_cache_test(file, arg, name, 256*1024, 0, 0); if (rc) return rc; rc = splat_kmem_cache_test(file, arg, name, 64*1024, 0, KMC_KMEM); if (rc) return rc; return splat_kmem_cache_test(file, arg, name, 1024*1024, 0, KMC_VMEM); }
/* * Validate object alignment cache behavior for caches */ static int splat_kmem_test7(struct file *file, void *arg) { char *name = SPLAT_KMEM_TEST7_NAME; int i, rc; for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) { rc = splat_kmem_cache_test(file, arg, name, 157, i, 0); if (rc) return rc; rc = splat_kmem_cache_test(file, arg, name, 157, i, KMC_OFFSLAB); if (rc) return rc; } return rc; }
/* * Validate large object cache behavior for dynamic/kmem/vmem caches */ static int splat_kmem_test6(struct file *file, void *arg) { char *name = SPLAT_KMEM_TEST6_NAME; int rc; /* On slab (default + kmem + vmem) */ rc = splat_kmem_cache_test(file, arg, name, 256*1024, 0, 0); if (rc) return rc; rc = splat_kmem_cache_test(file, arg, name, 64*1024, 0, KMC_KMEM); if (rc) return rc; rc = splat_kmem_cache_test(file, arg, name, 1024*1024, 0, KMC_VMEM); if (rc) return rc; /* Off slab (default + kmem + vmem) */ rc = splat_kmem_cache_test(file, arg, name, 256*1024, 0, KMC_OFFSLAB); if (rc) return rc; rc = splat_kmem_cache_test(file, arg, name, 64*1024, 0, KMC_KMEM | KMC_OFFSLAB); if (rc) return rc; rc = splat_kmem_cache_test(file, arg, name, 1024*1024, 0, KMC_VMEM | KMC_OFFSLAB); return rc; }
/* * Validate large object cache behavior for dynamic/kmem/vmem caches */ static int splat_kmem_test6(struct file *file, void *arg) { char *name = SPLAT_KMEM_TEST6_NAME; int i, max_size, rc = 0; /* Randomly pick large object sizes and alignments. */ for (i = 0; i < 100; i++) { int size, align, flags = 0; uint32_t rnd; /* Evenly distribute tests over all value cache types */ get_random_bytes((void *)&rnd, sizeof (uint32_t)); switch (rnd & 0x03) { default: case 0x00: flags = 0; max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2; break; case 0x01: flags = KMC_KMEM; max_size = (SPL_MAX_ORDER_NR_PAGES - 2) * PAGE_SIZE; break; case 0x02: flags = KMC_VMEM; max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2; break; case 0x03: flags = KMC_SLAB; max_size = SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE; break; } /* The following flags are set with a 1/10 chance */ flags |= ((((rnd >> 8) % 10) == 0) ? KMC_OFFSLAB : 0); flags |= ((((rnd >> 16) % 10) == 0) ? KMC_NOEMERGENCY : 0); /* PAGE_SIZE - max_size */ get_random_bytes((void *)&rnd, sizeof (uint32_t)); size = MAX(rnd % (max_size + 1), PAGE_SIZE), /* 2^N where (3 <= N <= PAGE_SHIFT) */ get_random_bytes((void *)&rnd, sizeof (uint32_t)); align = (1 << MAX(3, rnd % (PAGE_SHIFT + 1))); rc = splat_kmem_cache_test(file, arg, name, size, align, flags); if (rc) return (rc); } return (rc); }