static void splat_taskq_test2_func2(void *arg) { splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg; ASSERT(tq_arg); splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n", tq_arg->name, tq_arg->id, sym2str(splat_taskq_test2_func2), tq_arg->flag + 1, tq_arg->flag); tq_arg->flag += 1; }
static int splat_cred_test1(struct file *file, void *arg) { char str[GROUP_STR_SIZE]; uid_t uid, ruid, suid; gid_t gid, rgid, sgid, *groups; int ngroups, i, count = 0; uid = crgetuid(CRED()); ruid = crgetruid(CRED()); suid = crgetsuid(CRED()); gid = crgetgid(CRED()); rgid = crgetrgid(CRED()); sgid = crgetsgid(CRED()); crhold(CRED()); ngroups = crgetngroups(CRED()); groups = crgetgroups(CRED()); memset(str, 0, GROUP_STR_SIZE); for (i = 0; i < ngroups; i++) { count += sprintf(str + count, "%d ", groups[i]); if (count > (GROUP_STR_SIZE - GROUP_STR_REDZONE)) { splat_vprint(file, SPLAT_CRED_TEST1_NAME, "Failed too many group entries for temp " "buffer: %d, %s\n", ngroups, str); return -ENOSPC; } } crfree(CRED()); splat_vprint(file, SPLAT_CRED_TEST1_NAME, "uid: %d ruid: %d suid: %d " "gid: %d rgid: %d sgid: %d\n", uid, ruid, suid, gid, rgid, sgid); splat_vprint(file, SPLAT_CRED_TEST1_NAME, "ngroups: %d groups: %s\n", ngroups, str); if (uid || ruid || suid || gid || rgid || sgid) { splat_vprint(file, SPLAT_CRED_TEST1_NAME, "Failed expected all uids+gids to be %d\n", 0); return -EIDRM; } if (ngroups > NGROUPS_MAX) { splat_vprint(file, SPLAT_CRED_TEST1_NAME, "Failed ngroups must not exceed NGROUPS_MAX: " "%d > %d\n", ngroups, NGROUPS_MAX); return -EIDRM; } splat_vprint(file, SPLAT_CRED_TEST1_NAME, "Success sane CRED(): %d\n", 0); return 0; } /* splat_cred_test1() */
static int splat_zlib_test1_check(struct file *file, void *src, void *dst, void *chk, int level) { size_t dst_len = BUFFER_SIZE; size_t chk_len = BUFFER_SIZE; int rc; memset(dst, 0, BUFFER_SIZE); memset(chk, 0, BUFFER_SIZE); rc = z_compress_level(dst, &dst_len, src, BUFFER_SIZE, level); if (rc != Z_OK) { splat_vprint(file, SPLAT_ZLIB_TEST1_NAME, "Failed level %d z_compress_level(), %d\n", level, rc); return -EINVAL; } rc = z_uncompress(chk, &chk_len, dst, dst_len); if (rc != Z_OK) { splat_vprint(file, SPLAT_ZLIB_TEST1_NAME, "Failed level %d z_uncompress(), %d\n", level, rc); return -EINVAL; } rc = memcmp(src, chk, BUFFER_SIZE); if (rc) { splat_vprint(file, SPLAT_ZLIB_TEST1_NAME, "Failed level %d memcmp()), %d\n", level, rc); return -EINVAL; } splat_vprint(file, SPLAT_ZLIB_TEST1_NAME, "Passed level %d, compressed %d bytes to %d bytes\n", level, BUFFER_SIZE, (int)dst_len); return 0; }
static int splat_rwlock_test5(struct file *file, void *arg) { rw_priv_t *rwp; int rc = -EINVAL; rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL); if (rwp == NULL) return -ENOMEM; splat_init_rw_priv(rwp, file); rw_enter(&rwp->rw_rwlock, RW_WRITER); if (!RW_WRITE_HELD(&rwp->rw_rwlock)) { splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "rwlock should be write lock: %d\n", RW_WRITE_HELD(&rwp->rw_rwlock)); goto out; } rw_downgrade(&rwp->rw_rwlock); if (!RW_READ_HELD(&rwp->rw_rwlock)) { splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "rwlock should be read lock: %d\n", RW_READ_HELD(&rwp->rw_rwlock)); goto out; } rc = 0; splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "%s", "rwlock properly downgraded\n"); out: rw_exit(&rwp->rw_rwlock); rw_destroy(&rwp->rw_rwlock); kfree(rwp); return rc; }
static int splat_thread_test2(struct file *file, void *arg) { thread_priv_t tp; kthread_t *thr; int rc = 0; tp.tp_magic = SPLAT_THREAD_TEST_MAGIC; tp.tp_file = file; spin_lock_init(&tp.tp_lock); init_waitqueue_head(&tp.tp_waitq); tp.tp_rc = 0; thr = (kthread_t *)thread_create(NULL, 0, splat_thread_work2, &tp, 0, &p0, TS_RUN, defclsyspri); /* Must never fail under Solaris, but we check anyway since this * can happen in the linux SPL, we may want to change this behavior */ if (thr == NULL) return -ESRCH; /* Sleep until the thread sets tp.tp_rc == 1 */ wait_event(tp.tp_waitq, splat_thread_rc(&tp, 1)); /* Sleep until the thread sets tp.tp_rc == 2, or until we hit * the timeout. If thread exit is working properly we should * hit the timeout and never see to.tp_rc == 2. */ rc = wait_event_timeout(tp.tp_waitq, splat_thread_rc(&tp, 2), HZ / 10); if (rc > 0) { rc = -EINVAL; splat_vprint(file, SPLAT_THREAD_TEST2_NAME, "%s", "Thread did not exit properly at thread_exit()\n"); } else { splat_vprint(file, SPLAT_THREAD_TEST2_NAME, "%s", "Thread successfully exited at thread_exit()\n"); } return rc; }
int splat_condvar_test12_thread(void *arg) { condvar_thr_t *ct = (condvar_thr_t *)arg; condvar_priv_t *cv = ct->ct_cvp; ASSERT(cv->cv_magic == SPLAT_CONDVAR_TEST_MAGIC); mutex_enter(&cv->cv_mtx); splat_vprint(cv->cv_file, ct->ct_name, "%s thread sleeping with %d waiters\n", ct->ct_thread->comm, atomic_read(&cv->cv_condvar.cv_waiters)); cv_wait(&cv->cv_condvar, &cv->cv_mtx); splat_vprint(cv->cv_file, ct->ct_name, "%s thread woken %d waiters remain\n", ct->ct_thread->comm, atomic_read(&cv->cv_condvar.cv_waiters)); mutex_exit(&cv->cv_mtx); /* wait for main thread reap us */ while (!kthread_should_stop()) schedule(); return 0; }
static void splat_taskq_test7_func(void *arg) { splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg; taskqid_t id; ASSERT(tq_arg); if (tq_arg->depth >= SPLAT_TASKQ_DEPTH_MAX) return; tq_arg->depth++; splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST7_NAME, "Taskq '%s' function '%s' dispatching (depth = %u)\n", tq_arg->name, sym2str(splat_taskq_test7_func), tq_arg->depth); if (tq_arg->tqe) { VERIFY(taskq_empty_ent(tq_arg->tqe)); taskq_dispatch_ent(tq_arg->tq, splat_taskq_test7_func, tq_arg, TQ_SLEEP, tq_arg->tqe); id = tq_arg->tqe->tqent_id; } else { id = taskq_dispatch(tq_arg->tq, splat_taskq_test7_func, tq_arg, TQ_SLEEP); } if (id == 0) { splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST7_NAME, "Taskq '%s' function '%s' dispatch failed " "(depth = %u)\n", tq_arg->name, sym2str(splat_taskq_test7_func), tq_arg->depth); tq_arg->flag = -EINVAL; return; } }
static int splat_taskq_test1_impl(struct file *file, void *arg, boolean_t prealloc) { taskq_t *tq; taskqid_t id; splat_taskq_arg_t tq_arg; taskq_ent_t tqe; taskq_init_ent(&tqe); splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' creating (%s dispatch)\n", SPLAT_TASKQ_TEST1_NAME, prealloc ? "prealloc" : "dynamic"); if ((tq = taskq_create(SPLAT_TASKQ_TEST1_NAME, 1, maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) { splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST1_NAME); return -EINVAL; } tq_arg.flag = 0; tq_arg.id = 0; tq_arg.file = file; tq_arg.name = SPLAT_TASKQ_TEST1_NAME; splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' function '%s' dispatching\n", tq_arg.name, sym2str(splat_taskq_test13_func)); if (prealloc) { taskq_dispatch_ent(tq, splat_taskq_test13_func, &tq_arg, TQ_SLEEP, &tqe); id = tqe.tqent_id; } else { id = taskq_dispatch(tq, splat_taskq_test13_func, &tq_arg, TQ_SLEEP); } if (id == 0) { splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' function '%s' dispatch failed\n", tq_arg.name, sym2str(splat_taskq_test13_func)); taskq_destroy(tq); return -EINVAL; } splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n", tq_arg.name); taskq_wait(tq); splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n", tq_arg.name); taskq_destroy(tq); return (tq_arg.flag) ? 0 : -EINVAL; }
/* * Attempt to shrink the icache memory. This is simply a functional * to ensure we can correctly call the shrinker. We don't check that * the cache actually decreased because we have no control over what * else may be running on the system. This avoid false positives. */ static int splat_linux_test2(struct file *file, void *arg) { int remain_before; int remain_after; remain_before = shrink_icache_memory(0, GFP_KERNEL); remain_after = shrink_icache_memory(KMC_REAP_CHUNK, GFP_KERNEL); splat_vprint(file, SPLAT_LINUX_TEST2_NAME, "Shrink icache memory, remain %d -> %d\n", remain_before, remain_after); return 0; }
static int splat_list_test6(struct file *file, void *arg) { list_t list; list_item_t *li, *li_prev; int i, list_size = 8, rc = 0; splat_vprint(file, SPLAT_LIST_TEST6_NAME, "Creating list\n%s", ""); list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node)); /* Insert all items at the list tail to form a queue */ splat_vprint(file, SPLAT_LIST_TEST6_NAME, "Adding %d items to list tail\n", list_size); for (i = 0; i < list_size; i++) { li = kmem_alloc(sizeof(list_item_t), KM_SLEEP); if (li == NULL) { rc = -ENOMEM; goto out; } list_link_init(&li->li_node); li->li_data = i; list_insert_tail(&list, li); } /* Remove all odd items from the queue */ splat_vprint(file, SPLAT_LIST_TEST6_NAME, "Removing %d odd items from the list\n", list_size >> 1); for (li = list_head(&list); li != NULL; li = list_next(&list, li)) { if (li->li_data % 2 == 1) { li_prev = list_prev(&list, li); list_remove(&list, li); kmem_free(li, sizeof(list_item_t)); li = li_prev; } } splat_vprint(file, SPLAT_LIST_TEST6_NAME, "Validating %d item " "list is a queue of only even elements\n", list_size / 2); rc = splat_list_validate(&list, list_size / 2, LIST_ORDER_QUEUE, 2); if (rc) splat_vprint(file, SPLAT_LIST_TEST6_NAME, "List validation failed, %d\n", rc); out: /* Remove all items */ splat_vprint(file, SPLAT_LIST_TEST6_NAME, "Removing %d items from list tail\n", list_size / 2); while ((li = list_remove_tail(&list))) kmem_free(li, sizeof(list_item_t)); splat_vprint(file, SPLAT_LIST_TEST6_NAME, "Destroying list\n%s", ""); list_destroy(&list); return rc; }
static int splat_linux_drop_slab(struct file *file) { char *argv[] = { "/bin/sh", "-c", DROP_SLAB_CMD, NULL }; char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; int rc; rc = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); if (rc) splat_vprint(file, SPLAT_LINUX_TEST1_NAME, "Failed user helper '%s %s %s', rc = %d\n", argv[0], argv[1], argv[2], rc); return rc; }
static void splat_kmem_cache_test_debug(struct file *file, char *name, kmem_cache_priv_t *kcp) { int j; splat_vprint(file, name, "%s cache objects %d, slabs %u/%u objs %u/%u mags ", kcp->kcp_cache->skc_name, kcp->kcp_count, (unsigned)kcp->kcp_cache->skc_slab_alloc, (unsigned)kcp->kcp_cache->skc_slab_total, (unsigned)kcp->kcp_cache->skc_obj_alloc, (unsigned)kcp->kcp_cache->skc_obj_total); for_each_online_cpu(j) splat_print(file, "%u/%u ", kcp->kcp_cache->skc_mag[j]->skm_avail, kcp->kcp_cache->skc_mag[j]->skm_size); splat_print(file, "%s\n", ""); }
static int splat_list_test5(struct file *file, void *arg) { list_t list; list_item_t *li_new, *li_last = NULL; int i, list_size = 8, rc = 0; splat_vprint(file, SPLAT_LIST_TEST5_NAME, "Creating list\n%s", ""); list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node)); /* Insert all items before the last item to form a stack */ splat_vprint(file, SPLAT_LIST_TEST5_NAME, "Adding %d items each before the last item\n", list_size); for (i = 0; i < list_size; i++) { li_new = kmem_alloc(sizeof(list_item_t), KM_SLEEP); if (li_new == NULL) { rc = -ENOMEM; goto out; } list_link_init(&li_new->li_node); li_new->li_data = i; list_insert_before(&list, li_last, li_new); li_last = li_new; } splat_vprint(file, SPLAT_LIST_TEST5_NAME, "Validating %d item list is a queue\n", list_size); rc = splat_list_validate(&list, list_size, LIST_ORDER_STACK, 1); if (rc) splat_vprint(file, SPLAT_LIST_TEST5_NAME, "List validation failed, %d\n", rc); out: /* Remove all items */ splat_vprint(file, SPLAT_LIST_TEST5_NAME, "Removing %d items from list tail\n", list_size); while ((li_new = list_remove_tail(&list))) kmem_free(li_new, sizeof(list_item_t)); splat_vprint(file, SPLAT_LIST_TEST5_NAME, "Destroying list\n%s", ""); list_destroy(&list); return rc; }
static int splat_list_test2(struct file *file, void *arg) { list_t list; list_item_t *li; int i, list_size = 8, rc = 0; splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Creating list\n%s", ""); list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node)); /* Insert all items at the list head to form a stack */ splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Adding %d items to list head\n", list_size); for (i = 0; i < list_size; i++) { li = kmem_alloc(sizeof(list_item_t), KM_SLEEP); if (li == NULL) { rc = -ENOMEM; goto out; } list_link_init(&li->li_node); li->li_data = i; list_insert_head(&list, li); } splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Validating %d item list is a stack\n", list_size); rc = splat_list_validate(&list, list_size, LIST_ORDER_STACK, 1); if (rc) splat_vprint(file, SPLAT_LIST_TEST2_NAME, "List validation failed, %d\n", rc); out: /* Remove all items */ splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Removing %d items from list head\n", list_size); while ((li = list_remove_head(&list))) kmem_free(li, sizeof(list_item_t)); splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Destroying list\n%s", ""); list_destroy(&list); return rc; }
static int splat_taskq_test2_impl(struct file *file, void *arg, boolean_t prealloc) { taskq_t *tq[TEST2_TASKQS] = { NULL }; taskqid_t id; splat_taskq_arg_t tq_args[TEST2_TASKQS]; taskq_ent_t *func1_tqes = NULL; taskq_ent_t *func2_tqes = NULL; int i, rc = 0; func1_tqes = kmalloc(sizeof(*func1_tqes) * TEST2_TASKQS, GFP_KERNEL); if (func1_tqes == NULL) { rc = -ENOMEM; goto out; } func2_tqes = kmalloc(sizeof(*func2_tqes) * TEST2_TASKQS, GFP_KERNEL); if (func2_tqes == NULL) { rc = -ENOMEM; goto out; } for (i = 0; i < TEST2_TASKQS; i++) { taskq_init_ent(&func1_tqes[i]); taskq_init_ent(&func2_tqes[i]); splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d' creating (%s dispatch)\n", SPLAT_TASKQ_TEST2_NAME, i, prealloc ? "prealloc" : "dynamic"); if ((tq[i] = taskq_create(SPLAT_TASKQ_TEST2_NAME, TEST2_THREADS_PER_TASKQ, maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) { splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d' create failed\n", SPLAT_TASKQ_TEST2_NAME, i); rc = -EINVAL; break; } tq_args[i].flag = i; tq_args[i].id = i; tq_args[i].file = file; tq_args[i].name = SPLAT_TASKQ_TEST2_NAME; splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d' function '%s' dispatching\n", tq_args[i].name, tq_args[i].id, sym2str(splat_taskq_test2_func1)); if (prealloc) { taskq_dispatch_ent(tq[i], splat_taskq_test2_func1, &tq_args[i], TQ_SLEEP, &func1_tqes[i]); id = func1_tqes[i].tqent_id; } else { id = taskq_dispatch(tq[i], splat_taskq_test2_func1, &tq_args[i], TQ_SLEEP); } if (id == 0) { splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d' function '%s' dispatch " "failed\n", tq_args[i].name, tq_args[i].id, sym2str(splat_taskq_test2_func1)); rc = -EINVAL; break; } splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d' function '%s' dispatching\n", tq_args[i].name, tq_args[i].id, sym2str(splat_taskq_test2_func2)); if (prealloc) { taskq_dispatch_ent(tq[i], splat_taskq_test2_func2, &tq_args[i], TQ_SLEEP, &func2_tqes[i]); id = func2_tqes[i].tqent_id; } else { id = taskq_dispatch(tq[i], splat_taskq_test2_func2, &tq_args[i], TQ_SLEEP); } if (id == 0) { splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq " "'%s/%d' function '%s' dispatch failed\n", tq_args[i].name, tq_args[i].id, sym2str(splat_taskq_test2_func2)); rc = -EINVAL; break; } } /* When rc is set we're effectively just doing cleanup here, so * ignore new errors in that case. They just cause noise. */ for (i = 0; i < TEST2_TASKQS; i++) { if (tq[i] != NULL) { splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d' waiting\n", tq_args[i].name, tq_args[i].id); taskq_wait(tq[i]); splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d; destroying\n", tq_args[i].name, tq_args[i].id); taskq_destroy(tq[i]); if (!rc && tq_args[i].flag != ((i * 2) + 1)) { splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d' processed tasks " "out of order; %d != %d\n", tq_args[i].name, tq_args[i].id, tq_args[i].flag, i * 2 + 1); rc = -EINVAL; } else { splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq '%s/%d' processed tasks " "in the correct order; %d == %d\n", tq_args[i].name, tq_args[i].id, tq_args[i].flag, i * 2 + 1); } } } out: if (func1_tqes) kfree(func1_tqes); if (func2_tqes) kfree(func2_tqes); return rc; }
/* * Check vmem_size() behavior by acquiring the alloc/free/total vmem * space, then allocate a known buffer size from vmem space. We can * then check that vmem_size() values were updated properly with in * a fairly small tolerence. The tolerance is important because we * are not the only vmem consumer on the system. Other unrelated * allocations might occur during the small test window. The vmem * allocation itself may also add in a little extra private space to * the buffer. Finally, verify total space always remains unchanged. */ static int splat_kmem_test12(struct file *file, void *arg) { size_t alloc1, free1, total1; size_t alloc2, free2, total2; int size = 8*1024*1024; void *ptr; alloc1 = vmem_size(NULL, VMEM_ALLOC); free1 = vmem_size(NULL, VMEM_FREE); total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE); splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu " "free=%lu total=%lu\n", (unsigned long)alloc1, (unsigned long)free1, (unsigned long)total1); splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size); ptr = vmem_alloc(size, KM_SLEEP); if (!ptr) { splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed to alloc %d bytes\n", size); return -ENOMEM; } alloc2 = vmem_size(NULL, VMEM_ALLOC); free2 = vmem_size(NULL, VMEM_FREE); total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE); splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu " "free=%lu total=%lu\n", (unsigned long)alloc2, (unsigned long)free2, (unsigned long)total2); splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size); vmem_free(ptr, size); if (alloc2 < (alloc1 + size - (size / 100)) || alloc2 > (alloc1 + size + (size / 100))) { splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed " "VMEM_ALLOC size: %lu != %lu+%d (+/- 1%%)\n", (unsigned long)alloc2,(unsigned long)alloc1,size); return -ERANGE; } if (free2 < (free1 - size - (size / 100)) || free2 > (free1 - size + (size / 100))) { splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed " "VMEM_FREE size: %lu != %lu-%d (+/- 1%%)\n", (unsigned long)free2, (unsigned long)free1, size); return -ERANGE; } if (total1 != total2) { splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed " "VMEM_ALLOC | VMEM_FREE not constant: " "%lu != %lu\n", (unsigned long)total2, (unsigned long)total1); return -ERANGE; } splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "VMEM_ALLOC within tolerance: ~%ld%% (%ld/%d)\n", (long)abs(alloc1 + (long)size - alloc2) * 100 / (long)size, (long)abs(alloc1 + (long)size - alloc2), size); splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "VMEM_FREE within tolerance: ~%ld%% (%ld/%d)\n", (long)abs((free1 - (long)size) - free2) * 100 / (long)size, (long)abs((free1 - (long)size) - free2), size); return 0; }
/* * Validate kmem_cache_reap() by requesting the slab cache free any objects * it can. For a few reasons this may not immediately result in more free * memory even if objects are freed. First off, due to fragmentation we * may not be able to reclaim any slabs. Secondly, even if we do we fully * clear some slabs we will not want to immediately reclaim all of them * because we may contend with cache allocations and thrash. What we want * to see is the slab size decrease more gradually as it becomes clear they * will not be needed. This should be achievable in less than a minute. * If it takes longer than this something has gone wrong. */ static int splat_kmem_test8(struct file *file, void *arg) { kmem_cache_priv_t *kcp; kmem_cache_thread_t *kct; unsigned int spl_kmem_cache_expire_old; int i, rc = 0; /* Enable cache aging just for this test if it is disabled */ spl_kmem_cache_expire_old = spl_kmem_cache_expire; spl_kmem_cache_expire = KMC_EXPIRE_AGE; kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME, 256, 0, 0); if (!kcp) { splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to create '%s'\n", "kcp"); rc = -ENOMEM; goto out; } kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0, splat_kmem_cache_test_constructor, splat_kmem_cache_test_destructor, splat_kmem_cache_test_reclaim, kcp, NULL, 0); if (!kcp->kcp_cache) { splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -ENOMEM; goto out_kcp; } kct = splat_kmem_cache_test_kct_alloc(kcp, 0); if (!kct) { splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to create '%s'\n", "kct"); rc = -ENOMEM; goto out_cache; } rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, SPLAT_KMEM_OBJ_COUNT); if (rc) { splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to " "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME); goto out_kct; } /* Force reclaim every 1/10 a second for 60 seconds. */ for (i = 0; i < 600; i++) { kmem_cache_reap_now(kcp->kcp_cache); splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp); if (kcp->kcp_count == 0) break; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ / 10); } if (kcp->kcp_count == 0) { splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Successfully created %d objects " "in cache %s and reclaimed them\n", SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME); } else { splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Failed to reclaim %u/%d objects from cache %s\n", (unsigned)kcp->kcp_count, SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME); rc = -ENOMEM; } /* Cleanup our mess (for failure case of time expiring) */ splat_kmem_cache_test_kcd_free(kcp, kct); out_kct: splat_kmem_cache_test_kct_free(kcp, kct); out_cache: kmem_cache_destroy(kcp->kcp_cache); out_kcp: splat_kmem_cache_test_kcp_free(kcp); out: spl_kmem_cache_expire = spl_kmem_cache_expire_old; return rc; }
static int splat_kmem_cache_test(struct file *file, void *arg, char *name, int size, int align, int flags) { kmem_cache_priv_t *kcp = NULL; kmem_cache_data_t **kcd = NULL; int i, rc = 0, objs = 0; /* Limit size for low memory machines (1/128 of memory) */ size = MIN(size, (physmem * PAGE_SIZE) >> 7); splat_vprint(file, name, "Testing size=%d, align=%d, flags=0x%04x\n", size, align, flags); kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0); if (!kcp) { splat_vprint(file, name, "Unable to create '%s'\n", "kcp"); return (-ENOMEM); } kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, kcp->kcp_align, splat_kmem_cache_test_constructor, splat_kmem_cache_test_destructor, NULL, kcp, NULL, flags); if (kcp->kcp_cache == NULL) { splat_vprint(file, name, "Unable to create " "name='%s', size=%d, align=%d, flags=0x%x\n", SPLAT_KMEM_CACHE_NAME, size, align, flags); rc = -ENOMEM; goto out_free; } /* * Allocate several slabs worth of objects to verify functionality. * However, on 32-bit systems with limited address space constrain * it to a single slab for the purposes of this test. */ #ifdef _LP64 objs = kcp->kcp_cache->skc_slab_objs * 4; #else objs = 1; #endif kcd = kmem_zalloc(sizeof (kmem_cache_data_t *) * objs, KM_SLEEP); if (kcd == NULL) { splat_vprint(file, name, "Unable to allocate pointers " "for %d objects\n", objs); rc = -ENOMEM; goto out_free; } for (i = 0; i < objs; i++) { kcd[i] = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP); if (kcd[i] == NULL) { splat_vprint(file, name, "Unable to allocate " "from '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -EINVAL; goto out_free; } if (!kcd[i]->kcd_flag) { splat_vprint(file, name, "Failed to run constructor " "for '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -EINVAL; goto out_free; } if (kcd[i]->kcd_magic != kcp->kcp_magic) { splat_vprint(file, name, "Failed to pass private data to constructor " "for '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -EINVAL; goto out_free; } } for (i = 0; i < objs; i++) { kmem_cache_free(kcp->kcp_cache, kcd[i]); /* Destructors are run for every kmem_cache_free() */ if (kcd[i]->kcd_flag) { splat_vprint(file, name, "Failed to run destructor for '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -EINVAL; goto out_free; } } if (kcp->kcp_count) { splat_vprint(file, name, "Failed to run destructor on all slab objects for '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -EINVAL; } kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs); kmem_cache_destroy(kcp->kcp_cache); splat_kmem_cache_test_kcp_free(kcp); splat_vprint(file, name, "Success ran alloc'd/free'd %d objects of size %d\n", objs, size); return (rc); out_free: if (kcd) { for (i = 0; i < objs; i++) { if (kcd[i] != NULL) kmem_cache_free(kcp->kcp_cache, kcd[i]); } kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs); } if (kcp->kcp_cache) kmem_cache_destroy(kcp->kcp_cache); splat_kmem_cache_test_kcp_free(kcp); return (rc); }
/* * Create threads which set and verify SPLAT_THREAD_TEST_KEYS number of * keys. These threads may then exit by calling thread_exit() which calls * tsd_exit() resulting in all their thread specific data being reclaimed. * Alternately, the thread may block in which case the thread specific * data will be reclaimed as part of tsd_destroy(). In either case all * thread specific data must be reclaimed, this is verified by ensuring * the registered destructor is called the correct number of times. */ static int splat_thread_test3(struct file *file, void *arg) { int i, rc = 0, expected, wait_count = 0, exit_count = 0; thread_priv_t tp; tp.tp_magic = SPLAT_THREAD_TEST_MAGIC; tp.tp_file = file; spin_lock_init(&tp.tp_lock); init_waitqueue_head(&tp.tp_waitq); tp.tp_rc = 0; tp.tp_count = 0; tp.tp_dtor_count = 0; for (i = 0; i < SPLAT_THREAD_TEST_KEYS; i++) { tp.tp_keys[i] = 0; tsd_create(&tp.tp_keys[i], splat_thread_dtor3); } /* Start tsd wait threads */ for (i = 0; i < SPLAT_THREAD_TEST_THREADS; i++) { if (thread_create(NULL, 0, splat_thread_work3_wait, &tp, 0, &p0, TS_RUN, defclsyspri)) wait_count++; } /* All wait threads have setup their tsd and are blocking. */ wait_event(tp.tp_waitq, splat_thread_count(&tp, wait_count)); if (tp.tp_dtor_count != 0) { splat_vprint(file, SPLAT_THREAD_TEST3_NAME, "Prematurely ran %d tsd destructors\n", tp.tp_dtor_count); if (!rc) rc = -ERANGE; } /* Start tsd exit threads */ for (i = 0; i < SPLAT_THREAD_TEST_THREADS; i++) { if (thread_create(NULL, 0, splat_thread_work3_exit, &tp, 0, &p0, TS_RUN, defclsyspri)) exit_count++; } /* All exit threads verified tsd and are in the process of exiting */ wait_event(tp.tp_waitq,splat_thread_count(&tp, wait_count+exit_count)); msleep(500); expected = (SPLAT_THREAD_TEST_KEYS * exit_count); if (tp.tp_dtor_count != expected) { splat_vprint(file, SPLAT_THREAD_TEST3_NAME, "Expected %d exit tsd destructors but saw %d\n", expected, tp.tp_dtor_count); if (!rc) rc = -ERANGE; } /* Destroy all keys and associated tsd in blocked threads */ for (i = 0; i < SPLAT_THREAD_TEST_KEYS; i++) tsd_destroy(&tp.tp_keys[i]); expected = (SPLAT_THREAD_TEST_KEYS * (exit_count + wait_count)); if (tp.tp_dtor_count != expected) { splat_vprint(file, SPLAT_THREAD_TEST3_NAME, "Expected %d wait+exit tsd destructors but saw %d\n", expected, tp.tp_dtor_count); if (!rc) rc = -ERANGE; } /* Release the remaining wait threads, sleep briefly while they exit */ spin_lock(&tp.tp_lock); tp.tp_count = 0; wake_up_all(&tp.tp_waitq); spin_unlock(&tp.tp_lock); msleep(500); if (tp.tp_rc) { splat_vprint(file, SPLAT_THREAD_TEST3_NAME, "Thread tsd_get()/tsd_set() error %d\n", tp.tp_rc); if (!rc) rc = tp.tp_rc; } else if (!rc) { splat_vprint(file, SPLAT_THREAD_TEST3_NAME, "%s", "Thread specific data verified\n"); } return rc; }
static void splat_atomic_work(void *priv) { atomic_priv_t *ap; atomic_op_t op; int i; ap = (atomic_priv_t *)priv; ASSERT(ap->ap_magic == SPLAT_ATOMIC_TEST_MAGIC); spin_lock(&ap->ap_lock); op = ap->ap_op; wake_up(&ap->ap_waitq); spin_unlock(&ap->ap_lock); splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME, "Thread %d successfully started: %lu/%lu\n", op, (long unsigned)ap->ap_atomic, (long unsigned)ap->ap_atomic_exited); for (i = 0; i < SPLAT_ATOMIC_INIT_VALUE / 10; i++) { /* Periodically sleep to mix up the ordering */ if ((i % (SPLAT_ATOMIC_INIT_VALUE / 100)) == 0) { splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME, "Thread %d sleeping: %lu/%lu\n", op, (long unsigned)ap->ap_atomic, (long unsigned)ap->ap_atomic_exited); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ / 100); } switch (op) { case SPLAT_ATOMIC_INC_64: atomic_inc_64(&ap->ap_atomic); break; case SPLAT_ATOMIC_DEC_64: atomic_dec_64(&ap->ap_atomic); break; case SPLAT_ATOMIC_ADD_64: atomic_add_64(&ap->ap_atomic, 3); break; case SPLAT_ATOMIC_SUB_64: atomic_sub_64(&ap->ap_atomic, 3); break; case SPLAT_ATOMIC_ADD_64_NV: atomic_add_64_nv(&ap->ap_atomic, 5); break; case SPLAT_ATOMIC_SUB_64_NV: atomic_sub_64_nv(&ap->ap_atomic, 5); break; default: PANIC("Undefined op %d\n", op); } } atomic_inc_64(&ap->ap_atomic_exited); splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME, "Thread %d successfully exited: %lu/%lu\n", op, (long unsigned)ap->ap_atomic, (long unsigned)ap->ap_atomic_exited); wake_up(&ap->ap_waitq); thread_exit(); }
static int splat_vnode_test4(struct file *file, void *arg) { vnode_t *vp; char buf1[32] = "SPL VNode Interface Test File\n"; char buf2[32] = ""; int rc; if ((rc = splat_vnode_unlink_all(file, arg, SPLAT_VNODE_TEST4_NAME))) return rc; if ((rc = vn_open(SPLAT_VNODE_TEST_FILE_RW1, UIO_SYSSPACE, FWRITE | FREAD | FCREAT | FEXCL, 0644, &vp, 0, 0))) { splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Failed to vn_open test file: %s (%d)\n", SPLAT_VNODE_TEST_FILE_RW1, rc); goto out; } rc = vn_rdwr(UIO_WRITE, vp, buf1, strlen(buf1), 0, UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL); if (rc) { splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Failed vn_rdwr write of test file: %s (%d)\n", SPLAT_VNODE_TEST_FILE_RW1, rc); goto out2; } VOP_CLOSE(vp, 0, 0, 0, 0, 0); rc = vn_rename(SPLAT_VNODE_TEST_FILE_RW1,SPLAT_VNODE_TEST_FILE_RW2,0); if (rc) { splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Failed vn_rename " "%s -> %s (%d)\n", SPLAT_VNODE_TEST_FILE_RW1, SPLAT_VNODE_TEST_FILE_RW2, rc); goto out; } if ((rc = vn_open(SPLAT_VNODE_TEST_FILE_RW2, UIO_SYSSPACE, FREAD | FEXCL, 0644, &vp, 0, 0))) { splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Failed to vn_open test file: %s (%d)\n", SPLAT_VNODE_TEST_FILE_RW2, rc); goto out; } rc = vn_rdwr(UIO_READ, vp, buf2, strlen(buf1), 0, UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL); if (rc) { splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Failed vn_rdwr read of test file: %s (%d)\n", SPLAT_VNODE_TEST_FILE_RW2, rc); goto out2; } if (strncmp(buf1, buf2, strlen(buf1))) { rc = EINVAL; splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Failed strncmp data written does not match " "data read\nWrote: %sRead: %s\n", buf1, buf2); goto out2; } rc = 0; splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Wrote to %s: %s", SPLAT_VNODE_TEST_FILE_RW1, buf1); splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Read from %s: %s", SPLAT_VNODE_TEST_FILE_RW2, buf2); splat_vprint(file, SPLAT_VNODE_TEST4_NAME, "Successfully renamed " "test file %s -> %s and verified data pattern\n", SPLAT_VNODE_TEST_FILE_RW1, SPLAT_VNODE_TEST_FILE_RW2); out2: VOP_CLOSE(vp, 0, 0, 0, 0, 0); out: vn_remove(SPLAT_VNODE_TEST_FILE_RW1, UIO_SYSSPACE, RMFILE); vn_remove(SPLAT_VNODE_TEST_FILE_RW2, UIO_SYSSPACE, RMFILE); return -rc; } /* splat_vnode_test4() */
static int splat_taskq_test6_impl(struct file *file, void *arg, boolean_t prealloc) { taskq_t *tq; taskqid_t id; splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX]; splat_taskq_arg_t tq_arg; int order[SPLAT_TASKQ_ORDER_MAX] = { 1,2,3,6,7,8,4,5 }; taskq_ent_t tqes[SPLAT_TASKQ_ORDER_MAX]; int i, rc = 0; uint_t tflags; splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' creating (%s dispatch)\n", SPLAT_TASKQ_TEST6_NAME, prealloc ? "prealloc" : "dynamic"); if ((tq = taskq_create(SPLAT_TASKQ_TEST6_NAME, 3, maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) { splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST6_NAME); return -EINVAL; } tq_arg.flag = 0; memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX); spin_lock_init(&tq_arg.lock); tq_arg.file = file; tq_arg.name = SPLAT_TASKQ_TEST6_NAME; for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) { taskq_init_ent(&tqes[i]); tq_id[i].id = i + 1; tq_id[i].arg = &tq_arg; tflags = TQ_SLEEP; if (i > 4) tflags |= TQ_FRONT; if (prealloc) { taskq_dispatch_ent(tq, splat_taskq_test6_func, &tq_id[i], tflags, &tqes[i]); id = tqes[i].tqent_id; } else { id = taskq_dispatch(tq, splat_taskq_test6_func, &tq_id[i], tflags); } if (id == 0) { splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' function '%s' dispatch failed\n", tq_arg.name, sym2str(splat_taskq_test6_func)); rc = -EINVAL; goto out; } if (tq_id[i].id != id) { splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' expected taskqid %d got %d\n", tq_arg.name, (int)tq_id[i].id, (int)id); rc = -EINVAL; goto out; } /* Sleep to let tasks 1-3 start executing. */ if ( i == 2 ) msleep(100); } splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' " "waiting for taskqid %d completion\n", tq_arg.name, SPLAT_TASKQ_ORDER_MAX); taskq_wait_id(tq, SPLAT_TASKQ_ORDER_MAX); rc = splat_taskq_test_order(&tq_arg, order); out: splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' destroying\n", tq_arg.name); taskq_destroy(tq); return rc; }
static int splat_rwlock_test1(struct file *file, void *arg) { int i, count = 0, rc = 0; long pids[SPLAT_RWLOCK_TEST_COUNT]; rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT]; rw_priv_t *rwp; rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL); if (rwp == NULL) return -ENOMEM; splat_init_rw_priv(rwp, file); /* Create some threads, the exact number isn't important just as * long as we know how many we managed to create and should expect. */ for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) { rwt[i].rwt_rwp = rwp; rwt[i].rwt_id = i; rwt[i].rwt_name = SPLAT_RWLOCK_TEST1_NAME; /* The first thread will be the writer */ if (i == 0) pids[i] = kernel_thread(splat_rwlock_wr_thr, &rwt[i], 0); else pids[i] = kernel_thread(splat_rwlock_rd_thr, &rwt[i], 0); if (pids[i] >= 0) count++; } /* Wait for the writer */ while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders == 0)) { wake_up_interruptible(&rwp->rw_waitq); msleep(100); } /* Wait for 'count-1' readers */ while (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters < count - 1)) { wake_up_interruptible(&rwp->rw_waitq); msleep(100); } /* Verify there is only one lock holder */ if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders) != 1) { splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only 1 holder " "expected for rwlock (%d holding/%d waiting)\n", rwp->rw_holders, rwp->rw_waiters); rc = -EINVAL; } /* Verify 'count-1' readers */ if (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters != count - 1)) { splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d waiters " "expected for rwlock (%d holding/%d waiting)\n", count - 1, rwp->rw_holders, rwp->rw_waiters); rc = -EINVAL; } /* Signal the writer to release, allows readers to acquire */ spin_lock(&rwp->rw_lock); rwp->rw_release = SPLAT_RWLOCK_RELEASE_WR; wake_up_interruptible(&rwp->rw_waitq); spin_unlock(&rwp->rw_lock); /* Wait for 'count-1' readers to hold the lock */ while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders < count - 1)) { wake_up_interruptible(&rwp->rw_waitq); msleep(100); } /* Verify there are 'count-1' readers */ if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders != count - 1)) { splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d holders " "expected for rwlock (%d holding/%d waiting)\n", count - 1, rwp->rw_holders, rwp->rw_waiters); rc = -EINVAL; } /* Release 'count-1' readers */ spin_lock(&rwp->rw_lock); rwp->rw_release = SPLAT_RWLOCK_RELEASE_RD; wake_up_interruptible(&rwp->rw_waitq); spin_unlock(&rwp->rw_lock); /* Wait for the test to complete */ while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders>0 || rwp->rw_waiters>0)) msleep(100); rw_destroy(&(rwp->rw_rwlock)); kfree(rwp); return rc; }
static int splat_kmem_test8(struct file *file, void *arg) { kmem_cache_priv_t *kcp; kmem_cache_data_t *kcd; int i, rc = 0; kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME, 256, 0, 0, SPLAT_KMEM_OBJ_COUNT); if (!kcp) { splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to create '%s'\n", "kcp"); return -ENOMEM; } kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0, splat_kmem_cache_test_constructor, splat_kmem_cache_test_destructor, splat_kmem_cache_test_reclaim, kcp, NULL, 0); if (!kcp->kcp_cache) { splat_kmem_cache_test_kcp_free(kcp); splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME); return -ENOMEM; } for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) { kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP); spin_lock(&kcp->kcp_lock); kcp->kcp_kcd[i] = kcd; spin_unlock(&kcp->kcp_lock); if (!kcd) { splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME); } } /* Request the slab cache free any objects it can. For a few reasons * this may not immediately result in more free memory even if objects * are freed. First off, due to fragmentation we may not be able to * reclaim any slabs. Secondly, even if we do we fully clear some * slabs we will not want to immedately reclaim all of them because * we may contend with cache allocs and thrash. What we want to see * is the slab size decrease more gradually as it becomes clear they * will not be needed. This should be acheivable in less than minute * if it takes longer than this something has gone wrong. */ for (i = 0; i < 60; i++) { kmem_cache_reap_now(kcp->kcp_cache); splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp); if (kcp->kcp_cache->skc_obj_total == 0) break; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ); } if (kcp->kcp_cache->skc_obj_total == 0) { splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Successfully created %d objects " "in cache %s and reclaimed them\n", SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME); } else { splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Failed to reclaim %u/%d objects from cache %s\n", (unsigned)kcp->kcp_cache->skc_obj_total, SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME); rc = -ENOMEM; } /* Cleanup our mess (for failure case of time expiring) */ spin_lock(&kcp->kcp_lock); for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) if (kcp->kcp_kcd[i]) kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]); spin_unlock(&kcp->kcp_lock); kmem_cache_destroy(kcp->kcp_cache); splat_kmem_cache_test_kcp_free(kcp); return rc; }
static int splat_kmem_cache_test(struct file *file, void *arg, char *name, int size, int align, int flags) { kmem_cache_priv_t *kcp; kmem_cache_data_t *kcd; int rc = 0, max; kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0, 1); if (!kcp) { splat_vprint(file, name, "Unable to create '%s'\n", "kcp"); return -ENOMEM; } kcp->kcp_kcd[0] = NULL; kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, kcp->kcp_align, splat_kmem_cache_test_constructor, splat_kmem_cache_test_destructor, NULL, kcp, NULL, flags); if (!kcp->kcp_cache) { splat_vprint(file, name, "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -ENOMEM; goto out_free; } kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP); if (!kcd) { splat_vprint(file, name, "Unable to allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -EINVAL; goto out_free; } spin_lock(&kcp->kcp_lock); kcp->kcp_kcd[0] = kcd; spin_unlock(&kcp->kcp_lock); if (!kcp->kcp_kcd[0]->kcd_flag) { splat_vprint(file, name, "Failed to run contructor for '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -EINVAL; goto out_free; } if (kcp->kcp_kcd[0]->kcd_magic != kcp->kcp_magic) { splat_vprint(file, name, "Failed to pass private data to constructor " "for '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -EINVAL; goto out_free; } max = kcp->kcp_count; spin_lock(&kcp->kcp_lock); kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]); kcp->kcp_kcd[0] = NULL; spin_unlock(&kcp->kcp_lock); /* Destroy the entire cache which will force destructors to * run and we can verify one was called for every object */ kmem_cache_destroy(kcp->kcp_cache); if (kcp->kcp_count) { splat_vprint(file, name, "Failed to run destructor on all slab objects " "for '%s'\n", SPLAT_KMEM_CACHE_NAME); rc = -EINVAL; } splat_kmem_cache_test_kcp_free(kcp); splat_vprint(file, name, "Successfully ran ctors/dtors for %d elements in '%s'\n", max, SPLAT_KMEM_CACHE_NAME); return rc; out_free: if (kcp->kcp_kcd[0]) { spin_lock(&kcp->kcp_lock); kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]); kcp->kcp_kcd[0] = NULL; spin_unlock(&kcp->kcp_lock); } if (kcp->kcp_cache) kmem_cache_destroy(kcp->kcp_cache); splat_kmem_cache_test_kcp_free(kcp); return rc; }
static int splat_taskq_test8_common(struct file *file, void *arg, int minalloc, int maxalloc) { taskq_t *tq; taskqid_t id; splat_taskq_arg_t tq_arg; taskq_ent_t **tqes; int i, j, rc = 0; tqes = vmalloc(sizeof(*tqes) * TEST8_NUM_TASKS); if (tqes == NULL) return -ENOMEM; memset(tqes, 0, sizeof(*tqes) * TEST8_NUM_TASKS); splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' creating (%d/%d/%d)\n", SPLAT_TASKQ_TEST8_NAME, minalloc, maxalloc, TEST8_NUM_TASKS); if ((tq = taskq_create(SPLAT_TASKQ_TEST8_NAME, TEST8_THREADS_PER_TASKQ, maxclsyspri, minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) { splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST8_NAME); rc = -EINVAL; goto out_free; } tq_arg.file = file; tq_arg.name = SPLAT_TASKQ_TEST8_NAME; atomic_set(&tq_arg.count, 0); for (i = 0; i < TEST8_NUM_TASKS; i++) { tqes[i] = kmalloc(sizeof(taskq_ent_t), GFP_KERNEL); if (tqes[i] == NULL) { rc = -ENOMEM; goto out; } taskq_init_ent(tqes[i]); taskq_dispatch_ent(tq, splat_taskq_test8_func, &tq_arg, TQ_SLEEP, tqes[i]); id = tqes[i]->tqent_id; if (id == 0) { splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' function '%s' dispatch " "%d failed\n", tq_arg.name, sym2str(splat_taskq_test8_func), i); rc = -EINVAL; goto out; } } splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' " "waiting for %d dispatches\n", tq_arg.name, TEST8_NUM_TASKS); taskq_wait(tq); splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' " "%d/%d dispatches finished\n", tq_arg.name, atomic_read(&tq_arg.count), TEST8_NUM_TASKS); if (atomic_read(&tq_arg.count) != TEST8_NUM_TASKS) rc = -ERANGE; out: splat_vprint(file, SPLAT_TASKQ_TEST8_NAME, "Taskq '%s' destroying\n", tq_arg.name); taskq_destroy(tq); out_free: for (j = 0; j < TEST8_NUM_TASKS && tqes[j] != NULL; j++) kfree(tqes[j]); vfree(tqes); return rc; }
static int splat_kmem_cache_thread_test(struct file *file, void *arg, char *name, int size, int alloc, int max_time) { kmem_cache_priv_t *kcp; kthread_t *thr; struct timespec start, stop, delta; char cache_name[32]; int i, rc = 0; kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc, 0); if (!kcp) { splat_vprint(file, name, "Unable to create '%s'\n", "kcp"); return -ENOMEM; } (void)snprintf(cache_name, 32, "%s-%d-%d", SPLAT_KMEM_CACHE_NAME, size, alloc); kcp->kcp_cache = kmem_cache_create(cache_name, kcp->kcp_size, 0, splat_kmem_cache_test_constructor, splat_kmem_cache_test_destructor, splat_kmem_cache_test_reclaim, kcp, NULL, 0); if (!kcp->kcp_cache) { splat_vprint(file, name, "Unable to create '%s'\n", cache_name); rc = -ENOMEM; goto out_kcp; } start = current_kernel_time(); for (i = 0; i < SPLAT_KMEM_THREADS; i++) { thr = thread_create(NULL, 0, splat_kmem_cache_test_thread, kcp, 0, &p0, TS_RUN, minclsyspri); if (thr == NULL) { rc = -ESRCH; goto out_cache; } } /* Sleep until all threads have started, then set the ready * flag and wake them all up for maximum concurrency. */ wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS)); spin_lock(&kcp->kcp_lock); kcp->kcp_flags |= KCP_FLAG_READY; spin_unlock(&kcp->kcp_lock); wake_up_all(&kcp->kcp_thr_waitq); /* Sleep until all thread have finished */ wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0)); stop = current_kernel_time(); delta = timespec_sub(stop, start); splat_vprint(file, name, "%-22s %2ld.%09ld\t" "%lu/%lu/%lu\t%lu/%lu/%lu\n", kcp->kcp_cache->skc_name, delta.tv_sec, delta.tv_nsec, (unsigned long)kcp->kcp_cache->skc_slab_total, (unsigned long)kcp->kcp_cache->skc_slab_max, (unsigned long)(kcp->kcp_alloc * SPLAT_KMEM_THREADS / SPL_KMEM_CACHE_OBJ_PER_SLAB), (unsigned long)kcp->kcp_cache->skc_obj_total, (unsigned long)kcp->kcp_cache->skc_obj_max, (unsigned long)(kcp->kcp_alloc * SPLAT_KMEM_THREADS)); if (delta.tv_sec >= max_time) rc = -ETIME; if (!rc && kcp->kcp_rc) rc = kcp->kcp_rc; out_cache: kmem_cache_destroy(kcp->kcp_cache); out_kcp: splat_kmem_cache_test_kcp_free(kcp); return rc; }
static int splat_taskq_test4_common(struct file *file, void *arg, int minalloc, int maxalloc, int nr_tasks, boolean_t prealloc) { taskq_t *tq; taskqid_t id; splat_taskq_arg_t tq_arg; taskq_ent_t *tqes; int i, j, rc = 0; tqes = kmalloc(sizeof(*tqes) * nr_tasks, GFP_KERNEL); if (tqes == NULL) return -ENOMEM; splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n", SPLAT_TASKQ_TEST4_NAME, prealloc ? "prealloc" : "dynamic", minalloc, maxalloc, nr_tasks); if ((tq = taskq_create(SPLAT_TASKQ_TEST4_NAME, 1, maxclsyspri, minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) { splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST4_NAME); rc = -EINVAL; goto out_free; } tq_arg.file = file; tq_arg.name = SPLAT_TASKQ_TEST4_NAME; for (i = 1; i <= nr_tasks; i *= 2) { atomic_set(&tq_arg.count, 0); splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' function '%s' dispatched %d times\n", tq_arg.name, sym2str(splat_taskq_test4_func), i); for (j = 0; j < i; j++) { taskq_init_ent(&tqes[j]); if (prealloc) { taskq_dispatch_ent(tq, splat_taskq_test4_func, &tq_arg, TQ_SLEEP, &tqes[j]); id = tqes[j].tqent_id; } else { id = taskq_dispatch(tq, splat_taskq_test4_func, &tq_arg, TQ_SLEEP); } if (id == 0) { splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' function '%s' dispatch " "%d failed\n", tq_arg.name, sym2str(splat_taskq_test4_func), j); rc = -EINVAL; goto out; } } splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' " "waiting for %d dispatches\n", tq_arg.name, i); taskq_wait(tq); splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' " "%d/%d dispatches finished\n", tq_arg.name, atomic_read(&tq_arg.count), i); if (atomic_read(&tq_arg.count) != i) { rc = -ERANGE; goto out; } } out: splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' destroying\n", tq_arg.name); taskq_destroy(tq); out_free: kfree(tqes); return rc; }
static int splat_kmem_test9(struct file *file, void *arg) { kmem_cache_priv_t *kcp; kmem_cache_data_t *kcd; int i, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128; kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME, 256, 0, 0, count); if (!kcp) { splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Unable to create '%s'\n", "kcp"); return -ENOMEM; } kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0, splat_kmem_cache_test_constructor, splat_kmem_cache_test_destructor, NULL, kcp, NULL, 0); if (!kcp->kcp_cache) { splat_kmem_cache_test_kcp_free(kcp); splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME); return -ENOMEM; } for (i = 0; i < count; i++) { kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP); spin_lock(&kcp->kcp_lock); kcp->kcp_kcd[i] = kcd; spin_unlock(&kcp->kcp_lock); if (!kcd) { splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Unable to allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME); } } spin_lock(&kcp->kcp_lock); for (i = 0; i < count; i++) if (kcp->kcp_kcd[i]) kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]); spin_unlock(&kcp->kcp_lock); /* We have allocated a large number of objects thus creating a * large number of slabs and then free'd them all. However since * there should be little memory pressure at the moment those * slabs have not been freed. What we want to see is the slab * size decrease gradually as it becomes clear they will not be * be needed. This should be acheivable in less than minute * if it takes longer than this something has gone wrong. */ for (i = 0; i < 60; i++) { splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp); if (kcp->kcp_cache->skc_obj_total == 0) break; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ); } if (kcp->kcp_cache->skc_obj_total == 0) { splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Successfully created %d objects " "in cache %s and reclaimed them\n", count, SPLAT_KMEM_CACHE_NAME); } else { splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Failed to reclaim %u/%d objects from cache %s\n", (unsigned)kcp->kcp_cache->skc_obj_total, count, SPLAT_KMEM_CACHE_NAME); rc = -ENOMEM; } kmem_cache_destroy(kcp->kcp_cache); splat_kmem_cache_test_kcp_free(kcp); return rc; }
static int splat_taskq_test5_impl(struct file *file, void *arg, boolean_t prealloc) { taskq_t *tq; taskqid_t id; splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX]; splat_taskq_arg_t tq_arg; int order1[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,0,0,0 }; int order2[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,8,6,7 }; taskq_ent_t tqes[SPLAT_TASKQ_ORDER_MAX]; int i, rc = 0; splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' creating (%s dispatch)\n", SPLAT_TASKQ_TEST5_NAME, prealloc ? "prealloc" : "dynamic"); if ((tq = taskq_create(SPLAT_TASKQ_TEST5_NAME, 3, maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) { splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' create failed\n", SPLAT_TASKQ_TEST5_NAME); return -EINVAL; } tq_arg.flag = 0; memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX); spin_lock_init(&tq_arg.lock); tq_arg.file = file; tq_arg.name = SPLAT_TASKQ_TEST5_NAME; for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) { taskq_init_ent(&tqes[i]); tq_id[i].id = i + 1; tq_id[i].arg = &tq_arg; if (prealloc) { taskq_dispatch_ent(tq, splat_taskq_test5_func, &tq_id[i], TQ_SLEEP, &tqes[i]); id = tqes[i].tqent_id; } else { id = taskq_dispatch(tq, splat_taskq_test5_func, &tq_id[i], TQ_SLEEP); } if (id == 0) { splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' function '%s' dispatch failed\n", tq_arg.name, sym2str(splat_taskq_test5_func)); rc = -EINVAL; goto out; } if (tq_id[i].id != id) { splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' expected taskqid %d got %d\n", tq_arg.name, (int)tq_id[i].id, (int)id); rc = -EINVAL; goto out; } } splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' " "waiting for taskqid %d completion\n", tq_arg.name, 3); taskq_wait_id(tq, 3); if ((rc = splat_taskq_test_order(&tq_arg, order1))) goto out; splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' " "waiting for taskqid %d completion\n", tq_arg.name, 8); taskq_wait_id(tq, 8); rc = splat_taskq_test_order(&tq_arg, order2); out: splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' destroying\n", tq_arg.name); taskq_destroy(tq); return rc; }