Example #1
0
static int
splat_kmem_test8(struct file *file, void *arg)
{
	kmem_cache_priv_t *kcp;
	kmem_cache_data_t *kcd;
	int i, rc = 0;

	kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
					      256, 0, 0, SPLAT_KMEM_OBJ_COUNT);
	if (!kcp) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			     "Unable to create '%s'\n", "kcp");
		return -ENOMEM;
	}

	kcp->kcp_cache =
		kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
				  splat_kmem_cache_test_constructor,
				  splat_kmem_cache_test_destructor,
				  splat_kmem_cache_test_reclaim,
				  kcp, NULL, 0);
	if (!kcp->kcp_cache) {
		splat_kmem_cache_test_kcp_free(kcp);
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			   "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
		return -ENOMEM;
	}

	for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
		kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
		spin_lock(&kcp->kcp_lock);
		kcp->kcp_kcd[i] = kcd;
		spin_unlock(&kcp->kcp_lock);
		if (!kcd) {
			splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
				   "Unable to allocate from '%s'\n",
				   SPLAT_KMEM_CACHE_NAME);
		}
	}

	/* Request the slab cache free any objects it can.  For a few reasons
	 * this may not immediately result in more free memory even if objects
	 * are freed.  First off, due to fragmentation we may not be able to
	 * reclaim any slabs.  Secondly, even if we do we fully clear some
	 * slabs we will not want to immedately reclaim all of them because
	 * we may contend with cache allocs and thrash.  What we want to see
	 * is the slab size decrease more gradually as it becomes clear they
	 * will not be needed.  This should be acheivable in less than minute
	 * if it takes longer than this something has gone wrong.
	 */
	for (i = 0; i < 60; i++) {
		kmem_cache_reap_now(kcp->kcp_cache);
		splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);

		if (kcp->kcp_cache->skc_obj_total == 0)
			break;

		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
	}

	if (kcp->kcp_cache->skc_obj_total == 0) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			"Successfully created %d objects "
			"in cache %s and reclaimed them\n",
			SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
	} else {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			"Failed to reclaim %u/%d objects from cache %s\n",
			(unsigned)kcp->kcp_cache->skc_obj_total,
			SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
	}

	/* Cleanup our mess (for failure case of time expiring) */
	spin_lock(&kcp->kcp_lock);
	for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
		if (kcp->kcp_kcd[i])
			kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
	spin_unlock(&kcp->kcp_lock);

	kmem_cache_destroy(kcp->kcp_cache);
	splat_kmem_cache_test_kcp_free(kcp);

	return rc;
}
Example #2
0
static int
splat_kmem_test9(struct file *file, void *arg)
{
	kmem_cache_priv_t *kcp;
	kmem_cache_data_t *kcd;
	int i, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;

	kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
					      256, 0, 0, count);
	if (!kcp) {
		splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
			     "Unable to create '%s'\n", "kcp");
		return -ENOMEM;
	}

	kcp->kcp_cache =
		kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
				  splat_kmem_cache_test_constructor,
				  splat_kmem_cache_test_destructor,
				  NULL, kcp, NULL, 0);
	if (!kcp->kcp_cache) {
		splat_kmem_cache_test_kcp_free(kcp);
		splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
			   "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
		return -ENOMEM;
	}

	for (i = 0; i < count; i++) {
		kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
		spin_lock(&kcp->kcp_lock);
		kcp->kcp_kcd[i] = kcd;
		spin_unlock(&kcp->kcp_lock);
		if (!kcd) {
			splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
				   "Unable to allocate from '%s'\n",
				   SPLAT_KMEM_CACHE_NAME);
		}
	}

	spin_lock(&kcp->kcp_lock);
	for (i = 0; i < count; i++)
		if (kcp->kcp_kcd[i])
			kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
	spin_unlock(&kcp->kcp_lock);

	/* We have allocated a large number of objects thus creating a
	 * large number of slabs and then free'd them all.  However since
	 * there should be little memory pressure at the moment those
	 * slabs have not been freed.  What we want to see is the slab
	 * size decrease gradually as it becomes clear they will not be
	 * be needed.  This should be acheivable in less than minute
	 * if it takes longer than this something has gone wrong.
	 */
	for (i = 0; i < 60; i++) {
		splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp);

		if (kcp->kcp_cache->skc_obj_total == 0)
			break;

		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
	}

	if (kcp->kcp_cache->skc_obj_total == 0) {
		splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
			"Successfully created %d objects "
			"in cache %s and reclaimed them\n",
			count, SPLAT_KMEM_CACHE_NAME);
	} else {
		splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
			"Failed to reclaim %u/%d objects from cache %s\n",
			(unsigned)kcp->kcp_cache->skc_obj_total, count,
			SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
	}

	kmem_cache_destroy(kcp->kcp_cache);
	splat_kmem_cache_test_kcp_free(kcp);

	return rc;
}
Example #3
0
/*
 * Validate kmem_cache_reap() by requesting the slab cache free any objects
 * it can.  For a few reasons this may not immediately result in more free
 * memory even if objects are freed.  First off, due to fragmentation we
 * may not be able to reclaim any slabs.  Secondly, even if we do we fully
 * clear some slabs we will not want to immediately reclaim all of them
 * because we may contend with cache allocations and thrash.  What we want
 * to see is the slab size decrease more gradually as it becomes clear they
 * will not be needed.  This should be achievable in less than a minute.
 * If it takes longer than this something has gone wrong.
 */
static int
splat_kmem_test8(struct file *file, void *arg)
{
	kmem_cache_priv_t *kcp;
	kmem_cache_thread_t *kct;
	unsigned int spl_kmem_cache_expire_old;
	int i, rc = 0;

	/* Enable cache aging just for this test if it is disabled */
	spl_kmem_cache_expire_old = spl_kmem_cache_expire;
	spl_kmem_cache_expire = KMC_EXPIRE_AGE;

	kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
					      256, 0, 0);
	if (!kcp) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			     "Unable to create '%s'\n", "kcp");
		rc = -ENOMEM;
		goto out;
	}

	kcp->kcp_cache =
		kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
				  splat_kmem_cache_test_constructor,
				  splat_kmem_cache_test_destructor,
				  splat_kmem_cache_test_reclaim,
				  kcp, NULL, 0);
	if (!kcp->kcp_cache) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			   "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
		goto out_kcp;
	}

	kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
	if (!kct) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			     "Unable to create '%s'\n", "kct");
		rc = -ENOMEM;
		goto out_cache;
	}

	rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, SPLAT_KMEM_OBJ_COUNT);
	if (rc) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to "
			     "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
		goto out_kct;
	}

	/* Force reclaim every 1/10 a second for 60 seconds. */
	for (i = 0; i < 600; i++) {
		kmem_cache_reap_now(kcp->kcp_cache);
		splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);

		if (kcp->kcp_count == 0)
			break;

		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ / 10);
	}

	if (kcp->kcp_count == 0) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			"Successfully created %d objects "
			"in cache %s and reclaimed them\n",
			SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
	} else {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			"Failed to reclaim %u/%d objects from cache %s\n",
			(unsigned)kcp->kcp_count,
			SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
	}

	/* Cleanup our mess (for failure case of time expiring) */
	splat_kmem_cache_test_kcd_free(kcp, kct);
out_kct:
	splat_kmem_cache_test_kct_free(kcp, kct);
out_cache:
	kmem_cache_destroy(kcp->kcp_cache);
out_kcp:
	splat_kmem_cache_test_kcp_free(kcp);
out:
	spl_kmem_cache_expire = spl_kmem_cache_expire_old;

	return rc;
}