Example #1
0
int
main(int argc, char *argv[])
{
	const char *text = "Some test text";
	const char *text_empty = "";
	char *dir = NULL;
	void *mem_pool = NULL;
	VMEM *vmp;

	START(argc, argv, "vmem_strdup");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		UT_FATAL("usage: %s [directory]", argv[0]);
	}

	if (dir == NULL) {
		/* allocate memory for function vmem_create_in_region() */
		mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20);

		vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
		if (vmp == NULL)
			UT_FATAL("!vmem_create_in_region");
	} else {
		vmp = vmem_create(dir, VMEM_MIN_POOL);
		if (vmp == NULL)
			UT_FATAL("!vmem_create");
	}

	char *str1 = vmem_strdup(vmp, text);
	UT_ASSERTne(str1, NULL);
	UT_ASSERTeq(strcmp(text, str1), 0);

	/* check that pointer came from mem_pool */
	if (dir == NULL) {
		UT_ASSERTrange(str1, mem_pool, VMEM_MIN_POOL);
	}

	char *str2 = vmem_strdup(vmp, text_empty);
	UT_ASSERTne(str2, NULL);
	UT_ASSERTeq(strcmp(text_empty, str2), 0);

	/* check that pointer came from mem_pool */
	if (dir == NULL) {
		UT_ASSERTrange(str2, mem_pool, VMEM_MIN_POOL);
	}

	vmem_free(vmp, str1);
	vmem_free(vmp, str2);

	vmem_delete(vmp);

	DONE(NULL);
}
Example #2
0
void
dsl_pool_close(dsl_pool_t *dp)
{
	/*
	 * Drop our references from dsl_pool_open().
	 *
	 * Since we held the origin_snap from "syncing" context (which
	 * includes pool-opening context), it actually only got a "ref"
	 * and not a hold, so just drop that here.
	 */
	if (dp->dp_origin_snap != NULL)
		dsl_dataset_rele(dp->dp_origin_snap, dp);
	if (dp->dp_mos_dir != NULL)
		dsl_dir_rele(dp->dp_mos_dir, dp);
	if (dp->dp_free_dir != NULL)
		dsl_dir_rele(dp->dp_free_dir, dp);
	if (dp->dp_leak_dir != NULL)
		dsl_dir_rele(dp->dp_leak_dir, dp);
	if (dp->dp_root_dir != NULL)
		dsl_dir_rele(dp->dp_root_dir, dp);

	bpobj_close(&dp->dp_free_bpobj);
	bpobj_close(&dp->dp_obsolete_bpobj);

	/* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
	if (dp->dp_meta_objset != NULL)
		dmu_objset_evict(dp->dp_meta_objset);

	txg_list_destroy(&dp->dp_dirty_datasets);
	txg_list_destroy(&dp->dp_dirty_zilogs);
	txg_list_destroy(&dp->dp_sync_tasks);
	txg_list_destroy(&dp->dp_early_sync_tasks);
	txg_list_destroy(&dp->dp_dirty_dirs);

	taskq_destroy(dp->dp_zil_clean_taskq);
	taskq_destroy(dp->dp_sync_taskq);

	/*
	 * We can't set retry to TRUE since we're explicitly specifying
	 * a spa to flush. This is good enough; any missed buffers for
	 * this spa won't cause trouble, and they'll eventually fall
	 * out of the ARC just like any other unused buffer.
	 */
	arc_flush(dp->dp_spa, FALSE);

	mmp_fini(dp->dp_spa);
	txg_fini(dp);
	dsl_scan_fini(dp);
	dmu_buf_user_evict_wait();

	rrw_destroy(&dp->dp_config_rwlock);
	mutex_destroy(&dp->dp_lock);
	cv_destroy(&dp->dp_spaceavail_cv);
	taskq_destroy(dp->dp_iput_taskq);
	if (dp->dp_blkstats != NULL) {
		mutex_destroy(&dp->dp_blkstats->zab_lock);
		vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
	}
	kmem_free(dp, sizeof (dsl_pool_t));
}
Example #3
0
static int
splat_kmem_test3(struct file *file, void *arg)
{
	void *ptr[SPLAT_VMEM_ALLOC_COUNT];
	int size = PAGE_SIZE;
	int i, count, rc = 0;

	while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
		count = 0;

		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
			ptr[i] = vmem_alloc(size, KM_SLEEP);
			if (ptr[i])
				count++;
		}

		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
			if (ptr[i])
				vmem_free(ptr[i], size);

		splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
			   "%d byte allocations, %d/%d successful\n",
			   size, count, SPLAT_VMEM_ALLOC_COUNT);
		if (count != SPLAT_VMEM_ALLOC_COUNT)
			rc = -ENOMEM;

		size *= 2;
	}

	return rc;
}
/*
 * pool_test -- test pool
 *
 * This function creates a memory pool in a file (if dir is not NULL),
 * or in RAM (if dir is NULL) and allocates memory for the test.
 */
void
pool_test(const char *dir)
{
	VMEM *vmp = NULL;

	if (dir != NULL) {
		vmp = vmem_pool_create(dir, VMEM_MIN_POOL);
	} else {
		vmp = vmem_pool_create_in_region(mem_pool, VMEM_MIN_POOL);
	}

	if (expect_create_pool == 0) {
		ASSERTeq(vmp, NULL);
		DONE(NULL);
	} else {
		if (vmp == NULL) {
			if (dir == NULL) {
				FATAL("!vmem_pool_create_in_region");
			} else {
				FATAL("!vmem_pool_create");
			}
		}
	}

	char *test = vmem_malloc(vmp, strlen(TEST_STRING_VALUE) + 1);
	ASSERTne(test, NULL);

	strcpy(test, TEST_STRING_VALUE);
	ASSERTeq(strcmp(test, TEST_STRING_VALUE), 0);

	vmem_free(vmp, test);

	vmem_pool_delete(vmp);
}
Example #5
0
int
main(int argc, char *argv[])
{
	VMEM *vmp;
	char *ptr;

	/* create minimum size pool of memory */
	if ((vmp = vmem_create("/pmem-fs",
					VMEM_MIN_POOL)) == NULL) {
		perror("vmem_create");
		exit(1);
	}

	if ((ptr = vmem_malloc(vmp, 100)) == NULL) {
		perror("vmem_malloc");
		exit(1);
	}

	strcpy(ptr, "hello, world");

	/* give the memory back */
	vmem_free(vmp, ptr);

	/* ... */
}
Example #6
0
static void *
vmem_mmap_alloc(vmem_t *src, size_t size, int vmflags)
{
	void *ret;
	int old_errno = errno;

	ret = vmem_alloc(src, size, vmflags);
#ifndef _WIN32
	if (ret != NULL) {
	    if (mmap(ret, size, ALLOC_PROT, ALLOC_FLAGS | MAP_FIXED, -1, 0) ==
		    MAP_FAILED) {
		raise_mmap();
		if (mmap(ret, size, ALLOC_PROT, ALLOC_FLAGS | MAP_FIXED, -1, 0) ==
			MAP_FAILED) {
		    syslog(LOG_WARNING,
			    "vmem_mmap_alloc: mmap still failing after raise_mmap");
		    vmem_free(src, ret, size);
		    vmem_reap();

		    ASSERT((vmflags & VM_NOSLEEP) == VM_NOSLEEP);
		    errno = old_errno;
		    return (NULL);
		}
	    }
	}
#endif

	errno = old_errno;
	return (ret);
}
Example #7
0
void
percpu_free(percpu_t *pc, size_t size)
{

    ASSERT_SLEEPABLE();
    vmem_free(percpu_offset_arena, (vmem_addr_t)percpu_offset(pc), size);
}
Example #8
0
/*
 *	kva_free:
 *
 *	Release a region of kernel virtual memory allocated
 *	with kva_alloc, and return the physical pages
 *	associated with that region.
 *
 *	This routine may not block on kernel maps.
 */
void
kva_free(vm_offset_t addr, vm_size_t size)
{

	size = round_page(size);
	vmem_free(kernel_arena, addr, size);
}
Example #9
0
void
vmxnet3s_txcache_release(vmxnet3s_softc_t *dp)
{
	int		i;
	int		rc;
	vmxnet3s_txcache_t *cache = &dp->txcache;

	/* Unmap pages */
	hat_unload(kas.a_hat, cache->window, ptob(cache->num_pages),
	    HAT_UNLOAD_UNLOCK);
	vmem_free(heap_arena, cache->window, ptob(cache->num_pages));

	/* Free pages */
	for (i = 0; i < cache->num_pages; i++) {
		rc = page_tryupgrade(cache->pages[i]);
		if (!rc) {
			page_unlock(cache->pages[i]);
			while (!page_lock(cache->pages[i], SE_EXCL, NULL,
			    P_RECLAIM))
				;
		}
		page_free(cache->pages[i], 0);
	}
	page_unresv(cache->num_pages);

	kmem_free(cache->pages, cache->num_pages * sizeof (page_t *));
	kmem_free(cache->page_maps, cache->num_pages * sizeof (page_t *));
	kmem_free(cache->nodes,
	    cache->num_nodes * sizeof (vmxnet3s_txcache_node_t));
}
Example #10
0
/*
 * Close down the txg subsystem.
 */
void
txg_fini(dsl_pool_t *dp)
{
	tx_state_t *tx = &dp->dp_tx;
	int c;

	ASSERT(tx->tx_threads == 0);

	mutex_destroy(&tx->tx_sync_lock);

	cv_destroy(&tx->tx_sync_more_cv);
	cv_destroy(&tx->tx_sync_done_cv);
	cv_destroy(&tx->tx_quiesce_more_cv);
	cv_destroy(&tx->tx_quiesce_done_cv);
	cv_destroy(&tx->tx_exit_cv);

	for (c = 0; c < max_ncpus; c++) {
		int i;

		mutex_destroy(&tx->tx_cpu[c].tc_lock);
		for (i = 0; i < TXG_SIZE; i++) {
			cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
			list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
		}
	}

	if (tx->tx_commit_cb_taskq != NULL)
		taskq_destroy(tx->tx_commit_cb_taskq);

	vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));

	bzero(tx, sizeof (tx_state_t));
}
int
main(int argc, char *argv[])
{
	VMEM *vmp;
	size_t i;

	START(argc, argv, "vmem_pool_create_in_region");

	if (argc > 1)
		FATAL("usage: %s", argv[0]);

	vmp = vmem_pool_create_in_region(mem_pool, VMEM_MIN_POOL);

	if (vmp == NULL)
		FATAL("!vmem_pool_create_in_region");

	for (i = 0; i < TEST_ALLOCATIONS; ++i) {
		allocs[i] = vmem_malloc(vmp, sizeof (int));

		ASSERTne(allocs[i], NULL);

		/* check that pointer came from mem_pool */
		ASSERTrange(allocs[i], mem_pool, VMEM_MIN_POOL);
	}

	for (i = 0; i < TEST_ALLOCATIONS; ++i) {
		vmem_free(vmp, allocs[i]);
	}

	vmem_pool_delete(vmp);

	DONE(NULL);
}
Example #12
0
static int
splat_kmem_test3(struct file *file, void *arg)
{
	void *ptr[SPLAT_VMEM_ALLOC_COUNT];
	int size = PAGE_SIZE;
	int i, count, rc = 0;

	/*
	 * Test up to 4x the maximum kmem_alloc() size to ensure both
	 * the kmem_alloc() and vmem_alloc() call paths are used.
	 */
	while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) {
		count = 0;

		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
			ptr[i] = vmem_alloc(size, KM_SLEEP);
			if (ptr[i])
				count++;
		}

		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
			if (ptr[i])
				vmem_free(ptr[i], size);

		splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
			   "%d byte allocations, %d/%d successful\n",
			   size, count, SPLAT_VMEM_ALLOC_COUNT);
		if (count != SPLAT_VMEM_ALLOC_COUNT)
			rc = -ENOMEM;

		size *= 2;
	}

	return rc;
}
Example #13
0
int
zfs_sa_get_xattr(znode_t *zp)
{
	zfsvfs_t *zfsvfs = ZTOZSB(zp);
	char *obj;
	int size;
	int error;

	ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
	ASSERT(!zp->z_xattr_cached);
	ASSERT(zp->z_is_sa);

	error = sa_size(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), &size);
	if (error) {
		if (error == ENOENT)
			return nvlist_alloc(&zp->z_xattr_cached,
			    NV_UNIQUE_NAME, KM_SLEEP);
		else
			return (error);
	}

	obj = vmem_alloc(size, KM_SLEEP);

	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), obj, size);
	if (error == 0)
		error = nvlist_unpack(obj, size, &zp->z_xattr_cached, KM_SLEEP);

	vmem_free(obj, size);

	return (error);
}
Example #14
0
File: pios.c Project: 64116278/zfs
static void
zpios_cleanup_run(run_args_t *run_args)
{
	int i, size = 0;

	if (run_args == NULL)
		return;

	if (run_args->threads != NULL) {
		for (i = 0; i < run_args->thread_count; i++) {
			if (run_args->threads[i]) {
				mutex_destroy(&run_args->threads[i]->lock);
				kmem_free(run_args->threads[i],
				    sizeof (thread_data_t));
			}
		}

		kmem_free(run_args->threads,
		    sizeof (thread_data_t *) * run_args->thread_count);
	}

	for (i = 0; i < run_args->region_count; i++)
		mutex_destroy(&run_args->regions[i].lock);

	mutex_destroy(&run_args->lock_work);
	mutex_destroy(&run_args->lock_ctl);
	size = run_args->region_count * sizeof (zpios_region_t);

	vmem_free(run_args, sizeof (*run_args) + size);
}
Example #15
0
static int
zpios_ioctl_cmd(struct file *file, unsigned long arg)
{
	zpios_cmd_t *kcmd;
	void *data = NULL;
	int rc = -EINVAL;

	kcmd = kmem_alloc(sizeof (zpios_cmd_t), KM_SLEEP);

	rc = copy_from_user(kcmd, (zpios_cfg_t *)arg, sizeof (zpios_cmd_t));
	if (rc) {
		zpios_print(file, "Unable to copy command structure "
		    "from user to kernel memory, %d\n", rc);
		goto out_cmd;
	}

	if (kcmd->cmd_magic != ZPIOS_CMD_MAGIC) {
		zpios_print(file, "Bad command magic 0x%x != 0x%x\n",
		    kcmd->cmd_magic, ZPIOS_CFG_MAGIC);
		rc = (-EINVAL);
		goto out_cmd;
	}

	/* Allocate memory for any opaque data the caller needed to pass on */
	if (kcmd->cmd_data_size > 0) {
		data = (void *)vmem_alloc(kcmd->cmd_data_size, KM_SLEEP);

		rc = copy_from_user(data, (void *)(arg + offsetof(zpios_cmd_t,
		    cmd_data_str)), kcmd->cmd_data_size);
		if (rc) {
			zpios_print(file, "Unable to copy data buffer "
			    "from user to kernel memory, %d\n", rc);
			goto out_data;
		}
	}

	rc = zpios_do_one_run(file, kcmd, kcmd->cmd_data_size, data);

	if (data != NULL) {
		/* If the test failed do not print out the stats */
		if (rc)
			goto out_data;

		rc = copy_to_user((void *)(arg + offsetof(zpios_cmd_t,
		    cmd_data_str)), data, kcmd->cmd_data_size);
		if (rc) {
			zpios_print(file, "Unable to copy data buffer "
			    "from kernel to user memory, %d\n", rc);
			rc = -EFAULT;
		}

out_data:
		vmem_free(data, kcmd->cmd_data_size);
	}
out_cmd:
	kmem_free(kcmd, sizeof (zpios_cmd_t));

	return (rc);
}
Example #16
0
int
main(int argc, char *argv[])
{
	const int test_value = 123456;
	char *dir = NULL;
	int count = DEFAULT_COUNT;
	int n = DEFAULT_N;
	VMEM *vmp;
	int opt;
	int i, j;
	int use_calloc = 0;

	START(argc, argv, "vmem_pages_purging");

	while ((opt = getopt(argc, argv, "z")) != -1) {
		switch (opt) {
		case 'z':
			use_calloc = 1;
			break;
		default:
			usage(argv[0]);
		}
	}

	if (optind < argc) {
		dir = argv[optind];
	} else {
		usage(argv[0]);
	}

	vmp = vmem_create(dir, VMEM_MIN_POOL);
	if (vmp == NULL)
		FATAL("!vmem_create");

	for (i = 0; i < n; i++) {
		int *test = NULL;
		if (use_calloc)
			test = vmem_calloc(vmp, 1, count * sizeof (int));
		else
			test = vmem_malloc(vmp, count * sizeof (int));
		ASSERTne(test, NULL);

		if (use_calloc) {
			/* vmem_calloc should return zeroed memory */
			for (j = 0; j < count; j++)
				ASSERTeq(test[j], 0);
		}
		for (j = 0; j < count; j++)
			test[j] = test_value;
		for (j = 0; j < count; j++)
			ASSERTeq(test[j], test_value);

		vmem_free(vmp, test);
	}

	vmem_delete(vmp);

	DONE(NULL);
}
Example #17
0
static void
qc_poolpage_free(struct pool *pool, void *addr)
{
	qcache_t *qc = QC_POOL_TO_QCACHE(pool);
	vmem_t *vm = qc->qc_vmem;

	vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
}
Example #18
0
static void
free_ppods(struct tom_data *td, u_int ppod_addr, int n)
{

    MPASS(n > 0);

    vmem_free(td->ppod_arena, (vmem_addr_t)ppod_addr, PPOD_SZ(n));
}
Example #19
0
void
fletcher_4_init(void)
{
	static const size_t data_size = 1 << SPA_OLD_MAXBLOCKSHIFT; /* 128kiB */
	fletcher_4_ops_t *curr_impl;
	char *databuf;
	int i, c;

	/* move supported impl into fletcher_4_supp_impls */
	for (i = 0, c = 0; i < ARRAY_SIZE(fletcher_4_impls); i++) {
		curr_impl = (fletcher_4_ops_t *) fletcher_4_impls[i];

		if (curr_impl->valid && curr_impl->valid())
			fletcher_4_supp_impls[c++] = curr_impl;
	}
	membar_producer();	/* complete fletcher_4_supp_impls[] init */
	fletcher_4_supp_impls_cnt = c;	/* number of supported impl */

#if !defined(_KERNEL)
	/* Skip benchmarking and use last implementation as fastest */
	memcpy(&fletcher_4_fastest_impl,
	    fletcher_4_supp_impls[fletcher_4_supp_impls_cnt-1],
	    sizeof (fletcher_4_fastest_impl));
	fletcher_4_fastest_impl.name = "fastest";
	membar_producer();

	fletcher_4_initialized = B_TRUE;

	/* Use 'cycle' math selection method for userspace */
	VERIFY0(fletcher_4_impl_set("cycle"));
	return;
#endif
	/* Benchmark all supported implementations */
	databuf = vmem_alloc(data_size, KM_SLEEP);
	for (i = 0; i < data_size / sizeof (uint64_t); i++)
		((uint64_t *)databuf)[i] = (uintptr_t)(databuf+i); /* warm-up */

	fletcher_4_benchmark_impl(B_FALSE, databuf, data_size);
	fletcher_4_benchmark_impl(B_TRUE, databuf, data_size);

	vmem_free(databuf, data_size);

	/* install kstats for all implementations */
	fletcher_4_kstat = kstat_create("zfs", 0, "fletcher_4_bench", "misc",
		KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
	if (fletcher_4_kstat != NULL) {
		fletcher_4_kstat->ks_data = NULL;
		fletcher_4_kstat->ks_ndata = UINT32_MAX;
		kstat_set_raw_ops(fletcher_4_kstat,
		    fletcher_4_kstat_headers,
		    fletcher_4_kstat_data,
		    fletcher_4_kstat_addr);
		kstat_install(fletcher_4_kstat);
	}

	/* Finish initialization */
	fletcher_4_initialized = B_TRUE;
}
Example #20
0
int
main(int argc, char *argv[])
{
	const int test_value = 123456;
	char *dir = NULL;
	void *mem_pool = NULL;
	VMEM *vmp;

	START(argc, argv, "vmem_realloc");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		FATAL("usage: %s [directory]", argv[0]);
	}

	if (dir == NULL) {
		/* allocate memory for function vmem_create_in_region() */
		mem_pool = MMAP(NULL, VMEM_MIN_POOL, PROT_READ|PROT_WRITE,
					MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);

		vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create_in_region");
	} else {
		vmp = vmem_create(dir, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create");
	}

	int *test = vmem_realloc(vmp, NULL, sizeof (int));
	ASSERTne(test, NULL);

	test[0] = test_value;
	ASSERTeq(test[0], test_value);

	/* check that pointer came from mem_pool */
	if (dir == NULL) {
		ASSERTrange(test, mem_pool, VMEM_MIN_POOL);
	}

	test = vmem_realloc(vmp, test, sizeof (int) * 10);
	ASSERTne(test, NULL);
	ASSERTeq(test[0], test_value);
	test[1] = test_value;
	test[9] = test_value;

	/* check that pointer came from mem_pool */
	if (dir == NULL) {
		ASSERTrange(test, mem_pool, VMEM_MIN_POOL);
	}

	vmem_free(vmp, test);

	vmem_delete(vmp);

	DONE(NULL);
}
Example #21
0
int
main(int argc, char *argv[])
{
	char *dir = NULL;
	void *mem_pool = NULL;
	VMEM *vmp;
	START(argc, argv, "vmem_out_of_memory");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		FATAL("usage: %s [directory]", argv[0]);
	}

	if (dir == NULL) {
		/* allocate memory for function vmem_create_in_region() */
		mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20);

		vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create_in_region");
	} else {
		vmp = vmem_create(dir, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create");
	}

	/* allocate all memory */
	void *prev = NULL;
	for (;;) {
		void **next = vmem_malloc(vmp, sizeof (void *));
		if (next == NULL) {
			/* out of memory */
			break;
		}

		/* check that pointer came from mem_pool */
		if (dir == NULL) {
			ASSERTrange(next, mem_pool, VMEM_MIN_POOL);
		}

		*next = prev;
		prev = next;
	}

	ASSERTne(prev, NULL);

	/* free all allocations */
	while (prev != NULL) {
		void **act = prev;
		prev = *act;
		vmem_free(vmp, act);
	}

	vmem_delete(vmp);

	DONE(NULL);
}
Example #22
0
/*
 * Delaying the free() of small allocations gets more mileage
 * from pages during boot, otherwise a cycle of allocate/free
 * calls could burn through available heap32 space too quickly.
 */
void
promplat_free(void *p, size_t size)
{
	void *p2 = NULL;
	size_t s2;

	/*
	 * If VM is initialized, clean up any delayed free().
	 */
	if (kvseg.s_base != 0 && promplat_last_free != NULL) {
		mutex_enter(&promplat_lock);
		p2 = promplat_last_free;
		s2 = promplat_last_size;
		promplat_last_free = NULL;
		promplat_last_size = 0;
		mutex_exit(&promplat_lock);
		if (p2 != NULL) {
			vmem_free(promplat_arena, p2, s2);
			p2 = NULL;
		}
	}

	/*
	 * Do the free if VM is initialized or it's a large allocation.
	 */
	if (kvseg.s_base != 0 || size >= PAGESIZE) {
		vmem_free(promplat_arena, p, size);
		return;
	}

	/*
	 * Otherwise, do the last free request and delay this one.
	 */
	mutex_enter(&promplat_lock);
	if (promplat_last_free != NULL) {
		p2 = promplat_last_free;
		s2 = promplat_last_size;
	}
	promplat_last_free = p;
	promplat_last_size = size;
	mutex_exit(&promplat_lock);

	if (p2 != NULL)
		vmem_free(promplat_arena, p2, s2);
}
Example #23
0
void
segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
{
	if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
		segkmem_free(vmp, inaddr, size);
	} else {
		vmem_free(kmem_lp_arena, inaddr, size);
	}
}
Example #24
0
int
main(int argc, char *argv[])
{
	char *dir = NULL;
	void *mem_pool = NULL;
	VMEM *vmp;
	size_t obj_size;
	int *ptr[COUNT];
	int i = 0;
	size_t sum_alloc = 0;

	START(argc, argv, "vmem_mix_allocations");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		UT_FATAL("usage: %s [directory]", argv[0]);
	}

	if (dir == NULL) {
		/* allocate memory for function vmem_create_in_region() */
		mem_pool = MMAP_ANON_ALIGNED(POOL_SIZE, 4 << 20);

		vmp = vmem_create_in_region(mem_pool, POOL_SIZE);
		if (vmp == NULL)
			UT_FATAL("!vmem_create_in_region");
	} else {
		vmp = vmem_create(dir, POOL_SIZE);
		if (vmp == NULL)
			UT_FATAL("!vmem_create");
	}

	obj_size = MAX_SIZE;
	/* test with multiple size of allocations from 4MB to 2B */
	for (i = 0; i < COUNT; ++i, obj_size /= 2) {
		ptr[i] = vmem_malloc(vmp, obj_size);

		if (ptr[i] == NULL)
			continue;

		sum_alloc += obj_size;

		/* check that pointer came from mem_pool */
		if (dir == NULL)
			UT_ASSERTrange(ptr[i], mem_pool, POOL_SIZE);
	}

	/* allocate more than half of pool size */
	UT_ASSERT(sum_alloc * 2 > POOL_SIZE);

	while (i > 0)
		vmem_free(vmp, ptr[--i]);

	vmem_delete(vmp);

	DONE(NULL);
}
Example #25
0
/*
 *	Allocates a region from the kernel address map and physically
 *	contiguous pages within the specified address range to the kernel
 *	object.  Creates a wired mapping from this region to these pages, and
 *	returns the region's starting virtual address.  If M_ZERO is specified
 *	through the given flags, then the pages are zeroed before they are
 *	mapped.
 */
vm_offset_t
kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
    vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
    vm_memattr_t memattr)
{
	vmem_t *vmem;
	vm_object_t object = kernel_object;
	vm_offset_t addr, offset, tmp;
	vm_page_t end_m, m;
	u_long npages;
	int pflags, tries;
 
	size = round_page(size);
	vmem = vm_dom[domain].vmd_kernel_arena;
	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
		return (0);
	offset = addr - VM_MIN_KERNEL_ADDRESS;
	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
	pflags |= VM_ALLOC_NOWAIT;
	npages = atop(size);
	VM_OBJECT_WLOCK(object);
	tries = 0;
retry:
	m = vm_page_alloc_contig_domain(object, atop(offset), domain, pflags,
	    npages, low, high, alignment, boundary, memattr);
	if (m == NULL) {
		VM_OBJECT_WUNLOCK(object);
		if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
			if (!vm_page_reclaim_contig_domain(domain, pflags,
			    npages, low, high, alignment, boundary) &&
			    (flags & M_WAITOK) != 0)
				vm_wait_domain(domain);
			VM_OBJECT_WLOCK(object);
			tries++;
			goto retry;
		}
		vmem_free(vmem, addr, size);
		return (0);
	}
	KASSERT(vm_phys_domain(m) == domain,
	    ("kmem_alloc_contig_domain: Domain mismatch %d != %d",
	    vm_phys_domain(m), domain));
	end_m = m + npages;
	tmp = addr;
	for (; m < end_m; m++) {
		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
			pmap_zero_page(m);
		m->valid = VM_PAGE_BITS_ALL;
		pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL,
		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
		tmp += PAGE_SIZE;
	}
	VM_OBJECT_WUNLOCK(object);
	return (addr);
}
Example #26
0
/*
 * Any changes to this routine must also be carried over to
 * devmap_free_pages() in the seg_dev driver. This is because
 * we currently don't have a special kernel segment for non-paged
 * kernel memory that is exported by drivers to user space.
 */
static void
segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
    void (*func)(page_t *))
{
	page_t *pp;
	caddr_t addr = inaddr;
	caddr_t eaddr;
	pgcnt_t npages = btopr(size);

	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
	ASSERT(vp != NULL);

	if (kvseg.s_base == NULL) {
		segkmem_gc_list_t *gc = inaddr;
		gc->gc_arena = vmp;
		gc->gc_size = size;
		gc->gc_next = segkmem_gc_list;
		segkmem_gc_list = gc;
		return;
	}

	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);

	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
#if defined(__x86)
		pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
		if (pp == NULL)
			panic("segkmem_free: page not found");
		if (!page_tryupgrade(pp)) {
			/*
			 * Some other thread has a sharelock. Wait for
			 * it to drop the lock so we can free this page.
			 */
			page_unlock(pp);
			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
			    SE_EXCL);
		}
#else
		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
#endif
		if (pp == NULL)
			panic("segkmem_free: page not found");
		/* Clear p_lckcnt so page_destroy() doesn't update availrmem */
		pp->p_lckcnt = 0;
		if (func)
			func(pp);
		else
			page_destroy(pp, 0);
	}
	if (func == NULL)
		page_unresv(npages);

	if (vmp != NULL)
		vmem_free(vmp, inaddr, size);

}
/*
 * uvm_emap_free: free a window.
 */
void
uvm_emap_free(vaddr_t va, size_t size)
{

	KASSERT(va >= uvm_emap_va);
	KASSERT(size <= uvm_emap_size);
	KASSERT(va + size <= uvm_emap_va + uvm_emap_size);

	vmem_free(uvm_emap_vmem, va, size);
}
int
main(int argc, char *argv[])
{
	char *dir = NULL;
	void *mem_pool = NULL;
	VMEM *vmp;
	void *alloc;
	size_t usable_size;
	size_t size;
	unsigned i;

	START(argc, argv, "vmem_malloc_usable_size");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		FATAL("usage: %s [directory]", argv[0]);
	}

	if (dir == NULL) {
		/* allocate memory for function vmem_create_in_region() */
		mem_pool = MMAP_ANON_ALIGNED(POOL_SIZE, 4 << 20);

		vmp = vmem_create_in_region(mem_pool, POOL_SIZE);
		if (vmp == NULL)
			FATAL("!vmem_create_in_region");
	} else {
		vmp = vmem_create(dir, POOL_SIZE);
		if (vmp == NULL)
			FATAL("!vmem_create");
	}

	ASSERTeq(vmem_malloc_usable_size(vmp, NULL), 0);

	for (i = 0; i < (sizeof (Check_sizes) / sizeof (Check_sizes[0])); ++i) {
		size = Check_sizes[i].size;
		alloc = vmem_malloc(vmp, size);
		ASSERTne(alloc, NULL);
		usable_size = vmem_malloc_usable_size(vmp, alloc);
		ASSERT(usable_size >= size);
		if (usable_size - size > Check_sizes[i].spacing) {
			FATAL("Size %zu: spacing %zu is bigger"
				"than expected: %zu", size,
				(usable_size - size), Check_sizes[i].spacing);
		}
		memset(alloc, 0xEE, usable_size);
		vmem_free(vmp, alloc);
	}

	ASSERTeq(vmem_check(vmp), 1);

	vmem_delete(vmp);

	DONE(NULL);
}
Example #29
0
void
namenodeno_free(uint64_t nn)
{
	void *vaddr = (void *)(uintptr_t)nn;

	ASSERT32((uint64_t)(uintptr_t)vaddr == nn);

	mutex_enter(&nm_inolock);
	vmem_free(nm_inoarena, vaddr, 1);
	mutex_exit(&nm_inolock);
}
Example #30
0
/*
 *	kmem_free:
 *
 *	Free memory allocated with kmem_malloc.  The size must match the
 *	original allocation.
 */
void
kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
{
	int domain;

	KASSERT(vmem == kernel_arena,
	    ("kmem_free: Only kernel_arena is supported."));
	size = round_page(size);
	domain = _kmem_unback(kernel_object, addr, size);
	vmem_free(vm_dom[domain].vmd_kernel_arena, addr, size);
}