Exemplo n.º 1
0
static int
splat_kmem_test9(struct file *file, void *arg)
{
	kmem_cache_priv_t *kcp;
	kmem_cache_data_t *kcd;
	int i, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;

	kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
					      256, 0, 0, count);
	if (!kcp) {
		splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
			     "Unable to create '%s'\n", "kcp");
		return -ENOMEM;
	}

	kcp->kcp_cache =
		kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
				  splat_kmem_cache_test_constructor,
				  splat_kmem_cache_test_destructor,
				  NULL, kcp, NULL, 0);
	if (!kcp->kcp_cache) {
		splat_kmem_cache_test_kcp_free(kcp);
		splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
			   "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
		return -ENOMEM;
	}

	for (i = 0; i < count; i++) {
		kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
		spin_lock(&kcp->kcp_lock);
		kcp->kcp_kcd[i] = kcd;
		spin_unlock(&kcp->kcp_lock);
		if (!kcd) {
			splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
				   "Unable to allocate from '%s'\n",
				   SPLAT_KMEM_CACHE_NAME);
		}
	}

	spin_lock(&kcp->kcp_lock);
	for (i = 0; i < count; i++)
		if (kcp->kcp_kcd[i])
			kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
	spin_unlock(&kcp->kcp_lock);

	/* We have allocated a large number of objects thus creating a
	 * large number of slabs and then free'd them all.  However since
	 * there should be little memory pressure at the moment those
	 * slabs have not been freed.  What we want to see is the slab
	 * size decrease gradually as it becomes clear they will not be
	 * be needed.  This should be acheivable in less than minute
	 * if it takes longer than this something has gone wrong.
	 */
	for (i = 0; i < 60; i++) {
		splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp);

		if (kcp->kcp_cache->skc_obj_total == 0)
			break;

		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
	}

	if (kcp->kcp_cache->skc_obj_total == 0) {
		splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
			"Successfully created %d objects "
			"in cache %s and reclaimed them\n",
			count, SPLAT_KMEM_CACHE_NAME);
	} else {
		splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
			"Failed to reclaim %u/%d objects from cache %s\n",
			(unsigned)kcp->kcp_cache->skc_obj_total, count,
			SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
	}

	kmem_cache_destroy(kcp->kcp_cache);
	splat_kmem_cache_test_kcp_free(kcp);

	return rc;
}
Exemplo n.º 2
0
static int
splat_kmem_test8(struct file *file, void *arg)
{
	kmem_cache_priv_t *kcp;
	kmem_cache_data_t *kcd;
	int i, rc = 0;

	kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
					      256, 0, 0, SPLAT_KMEM_OBJ_COUNT);
	if (!kcp) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			     "Unable to create '%s'\n", "kcp");
		return -ENOMEM;
	}

	kcp->kcp_cache =
		kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
				  splat_kmem_cache_test_constructor,
				  splat_kmem_cache_test_destructor,
				  splat_kmem_cache_test_reclaim,
				  kcp, NULL, 0);
	if (!kcp->kcp_cache) {
		splat_kmem_cache_test_kcp_free(kcp);
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			   "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
		return -ENOMEM;
	}

	for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
		kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
		spin_lock(&kcp->kcp_lock);
		kcp->kcp_kcd[i] = kcd;
		spin_unlock(&kcp->kcp_lock);
		if (!kcd) {
			splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
				   "Unable to allocate from '%s'\n",
				   SPLAT_KMEM_CACHE_NAME);
		}
	}

	/* Request the slab cache free any objects it can.  For a few reasons
	 * this may not immediately result in more free memory even if objects
	 * are freed.  First off, due to fragmentation we may not be able to
	 * reclaim any slabs.  Secondly, even if we do we fully clear some
	 * slabs we will not want to immedately reclaim all of them because
	 * we may contend with cache allocs and thrash.  What we want to see
	 * is the slab size decrease more gradually as it becomes clear they
	 * will not be needed.  This should be acheivable in less than minute
	 * if it takes longer than this something has gone wrong.
	 */
	for (i = 0; i < 60; i++) {
		kmem_cache_reap_now(kcp->kcp_cache);
		splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);

		if (kcp->kcp_cache->skc_obj_total == 0)
			break;

		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
	}

	if (kcp->kcp_cache->skc_obj_total == 0) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			"Successfully created %d objects "
			"in cache %s and reclaimed them\n",
			SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
	} else {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			"Failed to reclaim %u/%d objects from cache %s\n",
			(unsigned)kcp->kcp_cache->skc_obj_total,
			SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
	}

	/* Cleanup our mess (for failure case of time expiring) */
	spin_lock(&kcp->kcp_lock);
	for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
		if (kcp->kcp_kcd[i])
			kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
	spin_unlock(&kcp->kcp_lock);

	kmem_cache_destroy(kcp->kcp_cache);
	splat_kmem_cache_test_kcp_free(kcp);

	return rc;
}
Exemplo n.º 3
0
static int
splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
			     int size, int alloc, int max_time)
{
	kmem_cache_priv_t *kcp;
	kthread_t *thr;
	struct timespec start, stop, delta;
	char cache_name[32];
	int i, rc = 0;

	kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc, 0);
	if (!kcp) {
		splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
		return -ENOMEM;
	}

	(void)snprintf(cache_name, 32, "%s-%d-%d",
		       SPLAT_KMEM_CACHE_NAME, size, alloc);
	kcp->kcp_cache =
		kmem_cache_create(cache_name, kcp->kcp_size, 0,
				  splat_kmem_cache_test_constructor,
				  splat_kmem_cache_test_destructor,
				  splat_kmem_cache_test_reclaim,
				  kcp, NULL, 0);
	if (!kcp->kcp_cache) {
		splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
		rc = -ENOMEM;
		goto out_kcp;
	}

	start = current_kernel_time();

	for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
		thr = thread_create(NULL, 0,
				    splat_kmem_cache_test_thread,
				    kcp, 0, &p0, TS_RUN, minclsyspri);
		if (thr == NULL) {
			rc = -ESRCH;
			goto out_cache;
		}
	}

	/* Sleep until all threads have started, then set the ready
	 * flag and wake them all up for maximum concurrency. */
	wait_event(kcp->kcp_ctl_waitq,
		   splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));

	spin_lock(&kcp->kcp_lock);
	kcp->kcp_flags |= KCP_FLAG_READY;
	spin_unlock(&kcp->kcp_lock);
	wake_up_all(&kcp->kcp_thr_waitq);

	/* Sleep until all thread have finished */
	wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));

	stop = current_kernel_time();
	delta = timespec_sub(stop, start);

	splat_vprint(file, name,
		     "%-22s %2ld.%09ld\t"
		     "%lu/%lu/%lu\t%lu/%lu/%lu\n",
		     kcp->kcp_cache->skc_name,
		     delta.tv_sec, delta.tv_nsec,
		     (unsigned long)kcp->kcp_cache->skc_slab_total,
		     (unsigned long)kcp->kcp_cache->skc_slab_max,
		     (unsigned long)(kcp->kcp_alloc *
				    SPLAT_KMEM_THREADS /
				    SPL_KMEM_CACHE_OBJ_PER_SLAB),
		     (unsigned long)kcp->kcp_cache->skc_obj_total,
		     (unsigned long)kcp->kcp_cache->skc_obj_max,
		     (unsigned long)(kcp->kcp_alloc *
				     SPLAT_KMEM_THREADS));

	if (delta.tv_sec >= max_time)
		rc = -ETIME;

	if (!rc && kcp->kcp_rc)
		rc = kcp->kcp_rc;

out_cache:
	kmem_cache_destroy(kcp->kcp_cache);
out_kcp:
	splat_kmem_cache_test_kcp_free(kcp);
	return rc;
}
Exemplo n.º 4
0
static int
splat_kmem_cache_test(struct file *file, void *arg, char *name,
		      int size, int align, int flags)
{
	kmem_cache_priv_t *kcp;
	kmem_cache_data_t *kcd;
	int rc = 0, max;

	kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0, 1);
	if (!kcp) {
		splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
		return -ENOMEM;
	}

	kcp->kcp_kcd[0] = NULL;
	kcp->kcp_cache =
		kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
				  kcp->kcp_size, kcp->kcp_align,
				  splat_kmem_cache_test_constructor,
				  splat_kmem_cache_test_destructor,
				  NULL, kcp, NULL, flags);
	if (!kcp->kcp_cache) {
		splat_vprint(file, name,
			     "Unable to create '%s'\n",
			     SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
		goto out_free;
	}

	kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
	if (!kcd) {
		splat_vprint(file, name,
			     "Unable to allocate from '%s'\n",
			     SPLAT_KMEM_CACHE_NAME);
		rc = -EINVAL;
		goto out_free;
	}
	spin_lock(&kcp->kcp_lock);
	kcp->kcp_kcd[0] = kcd;
	spin_unlock(&kcp->kcp_lock);

	if (!kcp->kcp_kcd[0]->kcd_flag) {
		splat_vprint(file, name,
			     "Failed to run contructor for '%s'\n",
			     SPLAT_KMEM_CACHE_NAME);
		rc = -EINVAL;
		goto out_free;
	}

	if (kcp->kcp_kcd[0]->kcd_magic != kcp->kcp_magic) {
		splat_vprint(file, name,
			     "Failed to pass private data to constructor "
			     "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
		rc = -EINVAL;
		goto out_free;
	}

	max = kcp->kcp_count;
	spin_lock(&kcp->kcp_lock);
	kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
	kcp->kcp_kcd[0] = NULL;
	spin_unlock(&kcp->kcp_lock);

	/* Destroy the entire cache which will force destructors to
	 * run and we can verify one was called for every object */
	kmem_cache_destroy(kcp->kcp_cache);
	if (kcp->kcp_count) {
		splat_vprint(file, name,
			     "Failed to run destructor on all slab objects "
			     "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
		rc = -EINVAL;
	}

	splat_kmem_cache_test_kcp_free(kcp);
	splat_vprint(file, name,
		     "Successfully ran ctors/dtors for %d elements in '%s'\n",
		     max, SPLAT_KMEM_CACHE_NAME);

	return rc;

out_free:
	if (kcp->kcp_kcd[0]) {
		spin_lock(&kcp->kcp_lock);
		kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
		kcp->kcp_kcd[0] = NULL;
		spin_unlock(&kcp->kcp_lock);
	}

	if (kcp->kcp_cache)
		kmem_cache_destroy(kcp->kcp_cache);

	splat_kmem_cache_test_kcp_free(kcp);

	return rc;
}
Exemplo n.º 5
0
/*
 * Validate kmem_cache_reap() by requesting the slab cache free any objects
 * it can.  For a few reasons this may not immediately result in more free
 * memory even if objects are freed.  First off, due to fragmentation we
 * may not be able to reclaim any slabs.  Secondly, even if we do we fully
 * clear some slabs we will not want to immediately reclaim all of them
 * because we may contend with cache allocations and thrash.  What we want
 * to see is the slab size decrease more gradually as it becomes clear they
 * will not be needed.  This should be achievable in less than a minute.
 * If it takes longer than this something has gone wrong.
 */
static int
splat_kmem_test8(struct file *file, void *arg)
{
	kmem_cache_priv_t *kcp;
	kmem_cache_thread_t *kct;
	unsigned int spl_kmem_cache_expire_old;
	int i, rc = 0;

	/* Enable cache aging just for this test if it is disabled */
	spl_kmem_cache_expire_old = spl_kmem_cache_expire;
	spl_kmem_cache_expire = KMC_EXPIRE_AGE;

	kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
					      256, 0, 0);
	if (!kcp) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			     "Unable to create '%s'\n", "kcp");
		rc = -ENOMEM;
		goto out;
	}

	kcp->kcp_cache =
		kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
				  splat_kmem_cache_test_constructor,
				  splat_kmem_cache_test_destructor,
				  splat_kmem_cache_test_reclaim,
				  kcp, NULL, 0);
	if (!kcp->kcp_cache) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			   "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
		goto out_kcp;
	}

	kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
	if (!kct) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			     "Unable to create '%s'\n", "kct");
		rc = -ENOMEM;
		goto out_cache;
	}

	rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, SPLAT_KMEM_OBJ_COUNT);
	if (rc) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to "
			     "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
		goto out_kct;
	}

	/* Force reclaim every 1/10 a second for 60 seconds. */
	for (i = 0; i < 600; i++) {
		kmem_cache_reap_now(kcp->kcp_cache);
		splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);

		if (kcp->kcp_count == 0)
			break;

		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ / 10);
	}

	if (kcp->kcp_count == 0) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			"Successfully created %d objects "
			"in cache %s and reclaimed them\n",
			SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
	} else {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			"Failed to reclaim %u/%d objects from cache %s\n",
			(unsigned)kcp->kcp_count,
			SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
	}

	/* Cleanup our mess (for failure case of time expiring) */
	splat_kmem_cache_test_kcd_free(kcp, kct);
out_kct:
	splat_kmem_cache_test_kct_free(kcp, kct);
out_cache:
	kmem_cache_destroy(kcp->kcp_cache);
out_kcp:
	splat_kmem_cache_test_kcp_free(kcp);
out:
	spl_kmem_cache_expire = spl_kmem_cache_expire_old;

	return rc;
}
Exemplo n.º 6
0
static int
splat_kmem_cache_test(struct file *file, void *arg, char *name,
    int size, int align, int flags)
{
	kmem_cache_priv_t *kcp = NULL;
	kmem_cache_data_t **kcd = NULL;
	int i, rc = 0, objs = 0;

	/* Limit size for low memory machines (1/128 of memory) */
	size = MIN(size, (physmem * PAGE_SIZE) >> 7);

	splat_vprint(file, name,
	    "Testing size=%d, align=%d, flags=0x%04x\n",
	    size, align, flags);

	kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0);
	if (!kcp) {
		splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
		return (-ENOMEM);
	}

	kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
	    kcp->kcp_size, kcp->kcp_align,
	    splat_kmem_cache_test_constructor,
	    splat_kmem_cache_test_destructor,
	    NULL, kcp, NULL, flags);
	if (kcp->kcp_cache == NULL) {
		splat_vprint(file, name, "Unable to create "
		    "name='%s', size=%d, align=%d, flags=0x%x\n",
		    SPLAT_KMEM_CACHE_NAME, size, align, flags);
		rc = -ENOMEM;
		goto out_free;
	}

	/*
	 * Allocate several slabs worth of objects to verify functionality.
	 * However, on 32-bit systems with limited address space constrain
	 * it to a single slab for the purposes of this test.
	 */
#ifdef _LP64
	objs = kcp->kcp_cache->skc_slab_objs * 4;
#else
	objs = 1;
#endif
	kcd = kmem_zalloc(sizeof (kmem_cache_data_t *) * objs, KM_SLEEP);
	if (kcd == NULL) {
		splat_vprint(file, name, "Unable to allocate pointers "
		    "for %d objects\n", objs);
		rc = -ENOMEM;
		goto out_free;
	}

	for (i = 0; i < objs; i++) {
		kcd[i] = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
		if (kcd[i] == NULL) {
			splat_vprint(file, name, "Unable to allocate "
			    "from '%s'\n", SPLAT_KMEM_CACHE_NAME);
			rc = -EINVAL;
			goto out_free;
		}

		if (!kcd[i]->kcd_flag) {
			splat_vprint(file, name, "Failed to run constructor "
			    "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
			rc = -EINVAL;
			goto out_free;
		}

		if (kcd[i]->kcd_magic != kcp->kcp_magic) {
			splat_vprint(file, name,
			    "Failed to pass private data to constructor "
			    "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
			rc = -EINVAL;
			goto out_free;
		}
	}

	for (i = 0; i < objs; i++) {
		kmem_cache_free(kcp->kcp_cache, kcd[i]);

		/* Destructors are run for every kmem_cache_free() */
		if (kcd[i]->kcd_flag) {
			splat_vprint(file, name,
			    "Failed to run destructor for '%s'\n",
			    SPLAT_KMEM_CACHE_NAME);
			rc = -EINVAL;
			goto out_free;
		}
	}

	if (kcp->kcp_count) {
		splat_vprint(file, name,
		    "Failed to run destructor on all slab objects for '%s'\n",
		    SPLAT_KMEM_CACHE_NAME);
		rc = -EINVAL;
	}

	kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs);
	kmem_cache_destroy(kcp->kcp_cache);

	splat_kmem_cache_test_kcp_free(kcp);
	splat_vprint(file, name,
	    "Success ran alloc'd/free'd %d objects of size %d\n",
	    objs, size);

	return (rc);

out_free:
	if (kcd) {
		for (i = 0; i < objs; i++) {
			if (kcd[i] != NULL)
				kmem_cache_free(kcp->kcp_cache, kcd[i]);
		}

		kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs);
	}

	if (kcp->kcp_cache)
		kmem_cache_destroy(kcp->kcp_cache);

	splat_kmem_cache_test_kcp_free(kcp);

	return (rc);
}