Exemple #1
0
int
zfs_sa_get_xattr(znode_t *zp)
{
	zfsvfs_t *zfsvfs = ZTOZSB(zp);
	char *obj;
	int size;
	int error;

	ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
	ASSERT(!zp->z_xattr_cached);
	ASSERT(zp->z_is_sa);

	error = sa_size(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), &size);
	if (error) {
		if (error == ENOENT)
			return nvlist_alloc(&zp->z_xattr_cached,
			    NV_UNIQUE_NAME, KM_SLEEP);
		else
			return (error);
	}

	obj = vmem_alloc(size, KM_SLEEP);

	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), obj, size);
	if (error == 0)
		error = nvlist_unpack(obj, size, &zp->z_xattr_cached, KM_SLEEP);

	vmem_free(obj, size);

	return (error);
}
Exemple #2
0
static void *
segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
{
	void *addr;
	segkmem_gc_list_t *gcp, **prev_gcpp;

	ASSERT(vp != NULL);

	if (kvseg.s_base == NULL) {
#ifndef __sparc
		if (bootops->bsys_alloc == NULL)
			halt("Memory allocation between bop_alloc() and "
			    "kmem_alloc().\n");
#endif

		/*
		 * There's not a lot of memory to go around during boot,
		 * so recycle it if we can.
		 */
		for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
		    prev_gcpp = &gcp->gc_next) {
			if (gcp->gc_arena == vmp && gcp->gc_size == size) {
				*prev_gcpp = gcp->gc_next;
				return (gcp);
			}
		}

		addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
		if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
			panic("segkmem_alloc: boot_alloc failed");
		return (addr);
	}
	return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
	    segkmem_page_create, vp));
}
Exemple #3
0
static int
splat_kmem_test3(struct file *file, void *arg)
{
	void *ptr[SPLAT_VMEM_ALLOC_COUNT];
	int size = PAGE_SIZE;
	int i, count, rc = 0;

	while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
		count = 0;

		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
			ptr[i] = vmem_alloc(size, KM_SLEEP);
			if (ptr[i])
				count++;
		}

		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
			if (ptr[i])
				vmem_free(ptr[i], size);

		splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
			   "%d byte allocations, %d/%d successful\n",
			   size, count, SPLAT_VMEM_ALLOC_COUNT);
		if (count != SPLAT_VMEM_ALLOC_COUNT)
			rc = -ENOMEM;

		size *= 2;
	}

	return rc;
}
static caddr_t
pci_cfgacc_map(paddr_t phys_addr)
{
#ifdef __xpv
	phys_addr = pfn_to_pa(xen_assign_pfn(mmu_btop(phys_addr))) |
	    (phys_addr & MMU_PAGEOFFSET);
#endif
	if (khat_running) {
		pfn_t pfn = mmu_btop(phys_addr);
		/*
		 * pci_cfgacc_virt_base may hold address left from early
		 * boot, which points to low mem. Realloc virtual address
		 * in kernel space since it's already late in boot now.
		 * Note: no need to unmap first, clear_boot_mappings() will
		 * do that for us.
		 */
		if (pci_cfgacc_virt_base < (caddr_t)kernelbase)
			pci_cfgacc_virt_base = vmem_alloc(heap_arena,
			    MMU_PAGESIZE, VM_SLEEP);

		hat_devload(kas.a_hat, pci_cfgacc_virt_base,
		    MMU_PAGESIZE, pfn, PROT_READ | PROT_WRITE |
		    HAT_STRICTORDER, HAT_LOAD_LOCK);
	} else {
		paddr_t	pa_base = P2ALIGN(phys_addr, MMU_PAGESIZE);

		if (pci_cfgacc_virt_base == NULL)
			pci_cfgacc_virt_base =
			    (caddr_t)alloc_vaddr(MMU_PAGESIZE, MMU_PAGESIZE);

		kbm_map((uintptr_t)pci_cfgacc_virt_base, pa_base, 0, 0);
	}

	return (pci_cfgacc_virt_base + (phys_addr & MMU_PAGEOFFSET));
}
int
iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
{
	struct dmar_unit *unit;
	vmem_addr_t vmem_res;
	u_int idx, i;
	int error;

	unit = dmar_ir_find(src, NULL, NULL);
	if (unit == NULL || !unit->ir_enabled) {
		for (i = 0; i < count; i++)
			cookies[i] = -1;
		return (EOPNOTSUPP);
	}

	error = vmem_alloc(unit->irtids, count, M_FIRSTFIT | M_NOWAIT,
	    &vmem_res);
	if (error != 0) {
		KASSERT(error != EOPNOTSUPP,
		    ("impossible EOPNOTSUPP from vmem"));
		return (error);
	}
	idx = vmem_res;
	for (i = 0; i < count; i++)
		cookies[i] = idx + i;
	return (0);
}
Exemple #6
0
static int
zpios_open(struct inode *inode, struct file *file)
{
	unsigned int minor = iminor(inode);
	zpios_info_t *info;

	if (minor >= ZPIOS_MINORS)
		return -ENXIO;

	info = (zpios_info_t *)kmem_alloc(sizeof(*info), KM_SLEEP);
	if (info == NULL)
		return -ENOMEM;

	spin_lock_init(&info->info_lock);
	info->info_size = ZPIOS_INFO_BUFFER_SIZE;
	info->info_buffer = (char *)vmem_alloc(ZPIOS_INFO_BUFFER_SIZE,KM_SLEEP);
	if (info->info_buffer == NULL) {
		kmem_free(info, sizeof(*info));
		return -ENOMEM;
	}

	info->info_head = info->info_buffer;
	file->private_data = (void *)info;

        return 0;
}
Exemple #7
0
static int
xpv_drv_init(void)
{
	if (xpv_feature(XPVF_HYPERCALLS) < 0 ||
	    xpv_feature(XPVF_SHARED_INFO) < 0)
		return (-1);

	/* Set up the grant tables.  */
	gnttab_init();

	/* Set up event channel support */
	if (ec_init() != 0)
		return (-1);

	/* Set up xenbus */
	xb_addr = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
	xs_early_init();
	xs_domu_init();

	/* Set up for suspend/resume/migrate */
	xen_shutdown_tq = taskq_create("shutdown_taskq", 1,
	    maxclsyspri - 1, 1, 1, TASKQ_PREPOPULATE);
	shutdown_watch.node = "control/shutdown";
	shutdown_watch.callback = xen_shutdown_handler;
	if (register_xenbus_watch(&shutdown_watch))
		cmn_err(CE_WARN, "Failed to set shutdown watcher");

	return (0);
}
Exemple #8
0
void main(void)
{
    int i;                                       /* loop counter    */
    int reps;
    time_t cstart1, cend1;
    int my_space;
    vptr foo;
    char *my_ptr;



   Mem::vmem_setup();
   my_space = vmem_create_space();

   printf(" size of struct frame_t = %d\n", sizeof(frame_t));
   printf(" size of     void * ptr = %d\n", sizeof(void *));
   printf(" size of     USHORT     = %d\n", sizeof(USHORT));
   printf(" size of     ULONG      = %d\n", sizeof(ULONG));
   printf(" size of     USHORT*    = %d\n", sizeof(USHORT *));
   printf(" size of     ULONG*     = %d\n", sizeof(ULONG *));

    cstart1 = clock();

    for(reps = 0; reps < 10000; reps++) {
       my_ptr = (char *)vmem_alloc(my_space, 80, &foo);
    }

    cend1 = clock();

    printf( "\nvmem struct access :\t\t\t%4.2f seconds \n",
                        ((float)cend1 - cstart1) / CLK_TCK );


}
Exemple #9
0
static void *
vmem_mmap_alloc(vmem_t *src, size_t size, int vmflags)
{
	void *ret;
	int old_errno = errno;

	ret = vmem_alloc(src, size, vmflags);
#ifndef _WIN32
	if (ret != NULL) {
	    if (mmap(ret, size, ALLOC_PROT, ALLOC_FLAGS | MAP_FIXED, -1, 0) ==
		    MAP_FAILED) {
		raise_mmap();
		if (mmap(ret, size, ALLOC_PROT, ALLOC_FLAGS | MAP_FIXED, -1, 0) ==
			MAP_FAILED) {
		    syslog(LOG_WARNING,
			    "vmem_mmap_alloc: mmap still failing after raise_mmap");
		    vmem_free(src, ret, size);
		    vmem_reap();

		    ASSERT((vmflags & VM_NOSLEEP) == VM_NOSLEEP);
		    errno = old_errno;
		    return (NULL);
		}
	    }
	}
#endif

	errno = old_errno;
	return (ret);
}
Exemple #10
0
static int
splat_kmem_test3(struct file *file, void *arg)
{
	void *ptr[SPLAT_VMEM_ALLOC_COUNT];
	int size = PAGE_SIZE;
	int i, count, rc = 0;

	/*
	 * Test up to 4x the maximum kmem_alloc() size to ensure both
	 * the kmem_alloc() and vmem_alloc() call paths are used.
	 */
	while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) {
		count = 0;

		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
			ptr[i] = vmem_alloc(size, KM_SLEEP);
			if (ptr[i])
				count++;
		}

		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
			if (ptr[i])
				vmem_free(ptr[i], size);

		splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
			   "%d byte allocations, %d/%d successful\n",
			   size, count, SPLAT_VMEM_ALLOC_COUNT);
		if (count != SPLAT_VMEM_ALLOC_COUNT)
			rc = -ENOMEM;

		size *= 2;
	}

	return rc;
}
int
iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
    bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo)
{
	struct dmar_unit *unit;
	vmem_addr_t vmem_res;
	uint64_t low, iorte;
	u_int idx;
	int error;
	uint16_t rid;

	unit = dmar_find_ioapic(ioapic_id, &rid);
	if (unit == NULL || !unit->ir_enabled) {
		*cookie = -1;
		return (EOPNOTSUPP);
	}

	error = vmem_alloc(unit->irtids, 1, M_FIRSTFIT | M_NOWAIT, &vmem_res);
	if (error != 0) {
		KASSERT(error != EOPNOTSUPP,
		    ("impossible EOPNOTSUPP from vmem"));
		return (error);
	}
	idx = vmem_res;
	low = 0;
	switch (irq) {
	case IRQ_EXTINT:
		low |= DMAR_IRTE1_DLM_ExtINT;
		break;
	case IRQ_NMI:
		low |= DMAR_IRTE1_DLM_NMI;
		break;
	case IRQ_SMI:
		low |= DMAR_IRTE1_DLM_SMI;
		break;
	default:
		KASSERT(vector != 0, ("No vector for IRQ %u", irq));
		low |= DMAR_IRTE1_DLM_FM | DMAR_IRTE1_V(vector);
		break;
	}
	low |= (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
	    DMAR_IRTE1_DST_xAPIC(cpu)) |
	    (edge ? DMAR_IRTE1_TM_EDGE : DMAR_IRTE1_TM_LEVEL) |
	    DMAR_IRTE1_RH_DIRECT | DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
	dmar_ir_program_irte(unit, idx, low, rid);

	if (hi != NULL) {
		/*
		 * See VT-d specification, 5.1.5.1 I/OxAPIC
		 * Programming.
		 */
		iorte = (1ULL << 48) | ((uint64_t)(idx & 0x7fff) << 49) |
		    ((idx & 0x8000) != 0 ? (1 << 11) : 0) |
		    (edge ? IOART_TRGREDG : IOART_TRGRLVL) |
		    (activehi ? IOART_INTAHI : IOART_INTALO) |
		    IOART_DELFIXED | vector;
		*hi = iorte >> 32;
		*lo = iorte;
	}
Exemple #12
0
static int
zpios_ioctl_cmd(struct file *file, unsigned long arg)
{
	zpios_cmd_t *kcmd;
	void *data = NULL;
	int rc = -EINVAL;

	kcmd = kmem_alloc(sizeof (zpios_cmd_t), KM_SLEEP);

	rc = copy_from_user(kcmd, (zpios_cfg_t *)arg, sizeof (zpios_cmd_t));
	if (rc) {
		zpios_print(file, "Unable to copy command structure "
		    "from user to kernel memory, %d\n", rc);
		goto out_cmd;
	}

	if (kcmd->cmd_magic != ZPIOS_CMD_MAGIC) {
		zpios_print(file, "Bad command magic 0x%x != 0x%x\n",
		    kcmd->cmd_magic, ZPIOS_CFG_MAGIC);
		rc = (-EINVAL);
		goto out_cmd;
	}

	/* Allocate memory for any opaque data the caller needed to pass on */
	if (kcmd->cmd_data_size > 0) {
		data = (void *)vmem_alloc(kcmd->cmd_data_size, KM_SLEEP);

		rc = copy_from_user(data, (void *)(arg + offsetof(zpios_cmd_t,
		    cmd_data_str)), kcmd->cmd_data_size);
		if (rc) {
			zpios_print(file, "Unable to copy data buffer "
			    "from user to kernel memory, %d\n", rc);
			goto out_data;
		}
	}

	rc = zpios_do_one_run(file, kcmd, kcmd->cmd_data_size, data);

	if (data != NULL) {
		/* If the test failed do not print out the stats */
		if (rc)
			goto out_data;

		rc = copy_to_user((void *)(arg + offsetof(zpios_cmd_t,
		    cmd_data_str)), data, kcmd->cmd_data_size);
		if (rc) {
			zpios_print(file, "Unable to copy data buffer "
			    "from kernel to user memory, %d\n", rc);
			rc = -EFAULT;
		}

out_data:
		vmem_free(data, kcmd->cmd_data_size);
	}
out_cmd:
	kmem_free(kcmd, sizeof (zpios_cmd_t));

	return (rc);
}
Exemple #13
0
int
_init(void)
{
	int e;

	e = mod_install(&modlinkage);
	if (e != 0)
		return (e);

	sbdp_shutdown_va = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
	ASSERT(sbdp_shutdown_va != NULL);
	sbdp_valp = (uint64_t *)vmem_alloc(static_alloc_arena,
	    sizeof (uint64_t), VM_SLEEP);

	mutex_init(&sbdp_wnode_mutex, NULL, MUTEX_DRIVER, NULL);
	return (e);
}
Exemple #14
0
void
fletcher_4_init(void)
{
	static const size_t data_size = 1 << SPA_OLD_MAXBLOCKSHIFT; /* 128kiB */
	fletcher_4_ops_t *curr_impl;
	char *databuf;
	int i, c;

	/* move supported impl into fletcher_4_supp_impls */
	for (i = 0, c = 0; i < ARRAY_SIZE(fletcher_4_impls); i++) {
		curr_impl = (fletcher_4_ops_t *) fletcher_4_impls[i];

		if (curr_impl->valid && curr_impl->valid())
			fletcher_4_supp_impls[c++] = curr_impl;
	}
	membar_producer();	/* complete fletcher_4_supp_impls[] init */
	fletcher_4_supp_impls_cnt = c;	/* number of supported impl */

#if !defined(_KERNEL)
	/* Skip benchmarking and use last implementation as fastest */
	memcpy(&fletcher_4_fastest_impl,
	    fletcher_4_supp_impls[fletcher_4_supp_impls_cnt-1],
	    sizeof (fletcher_4_fastest_impl));
	fletcher_4_fastest_impl.name = "fastest";
	membar_producer();

	fletcher_4_initialized = B_TRUE;

	/* Use 'cycle' math selection method for userspace */
	VERIFY0(fletcher_4_impl_set("cycle"));
	return;
#endif
	/* Benchmark all supported implementations */
	databuf = vmem_alloc(data_size, KM_SLEEP);
	for (i = 0; i < data_size / sizeof (uint64_t); i++)
		((uint64_t *)databuf)[i] = (uintptr_t)(databuf+i); /* warm-up */

	fletcher_4_benchmark_impl(B_FALSE, databuf, data_size);
	fletcher_4_benchmark_impl(B_TRUE, databuf, data_size);

	vmem_free(databuf, data_size);

	/* install kstats for all implementations */
	fletcher_4_kstat = kstat_create("zfs", 0, "fletcher_4_bench", "misc",
		KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
	if (fletcher_4_kstat != NULL) {
		fletcher_4_kstat->ks_data = NULL;
		fletcher_4_kstat->ks_ndata = UINT32_MAX;
		kstat_set_raw_ops(fletcher_4_kstat,
		    fletcher_4_kstat_headers,
		    fletcher_4_kstat_data,
		    fletcher_4_kstat_addr);
		kstat_install(fletcher_4_kstat);
	}

	/* Finish initialization */
	fletcher_4_initialized = B_TRUE;
}
Exemple #15
0
/* Allocate a new matrix object of size rows x cols, from the virtual memory
 * pool.  The elements themselves are uninitialized.
 */
matrix_t * vmalloc_matrix(int rows, int cols) {
    matrix_t *m;

    m = vmem_alloc(sizeof(matrix_t) + rows * cols * sizeof(int));
    m->rows = rows;
    m->cols = cols;

    return m;
}
Exemple #16
0
/*
 *	Allocates a region from the kernel address map and physically
 *	contiguous pages within the specified address range to the kernel
 *	object.  Creates a wired mapping from this region to these pages, and
 *	returns the region's starting virtual address.  If M_ZERO is specified
 *	through the given flags, then the pages are zeroed before they are
 *	mapped.
 */
vm_offset_t
kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
    vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
    vm_memattr_t memattr)
{
	vmem_t *vmem;
	vm_object_t object = kernel_object;
	vm_offset_t addr, offset, tmp;
	vm_page_t end_m, m;
	u_long npages;
	int pflags, tries;
 
	size = round_page(size);
	vmem = vm_dom[domain].vmd_kernel_arena;
	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
		return (0);
	offset = addr - VM_MIN_KERNEL_ADDRESS;
	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
	pflags |= VM_ALLOC_NOWAIT;
	npages = atop(size);
	VM_OBJECT_WLOCK(object);
	tries = 0;
retry:
	m = vm_page_alloc_contig_domain(object, atop(offset), domain, pflags,
	    npages, low, high, alignment, boundary, memattr);
	if (m == NULL) {
		VM_OBJECT_WUNLOCK(object);
		if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
			if (!vm_page_reclaim_contig_domain(domain, pflags,
			    npages, low, high, alignment, boundary) &&
			    (flags & M_WAITOK) != 0)
				vm_wait_domain(domain);
			VM_OBJECT_WLOCK(object);
			tries++;
			goto retry;
		}
		vmem_free(vmem, addr, size);
		return (0);
	}
	KASSERT(vm_phys_domain(m) == domain,
	    ("kmem_alloc_contig_domain: Domain mismatch %d != %d",
	    vm_phys_domain(m), domain));
	end_m = m + npages;
	tmp = addr;
	for (; m < end_m; m++) {
		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
			pmap_zero_page(m);
		m->valid = VM_PAGE_BITS_ALL;
		pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL,
		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
		tmp += PAGE_SIZE;
	}
	VM_OBJECT_WUNLOCK(object);
	return (addr);
}
/*
 * Functions to allocate node id's starting from 1. Based on vmem routines.
 * The vmem arena is extended in NM_INOQUANT chunks.
 */
uint64_t
namenodeno_alloc(void)
{
	uint64_t nno;

	mutex_enter(&nm_inolock);
	nno = (uint64_t)(uintptr_t)
	    vmem_alloc(nm_inoarena, 1, VM_NOSLEEP + VM_FIRSTFIT);
	if (nno == 0) {
		(void) vmem_add(nm_inoarena, (void *)(vmem_size(nm_inoarena,
		    VMEM_ALLOC | VMEM_FREE) + 1), NM_INOQUANT, VM_SLEEP);
		nno = (uint64_t)(uintptr_t)
		    vmem_alloc(nm_inoarena, 1, VM_SLEEP + VM_FIRSTFIT);
		ASSERT(nno != 0);
	}
	mutex_exit(&nm_inolock);
	ASSERT32(nno <= ULONG_MAX);
	return (nno);
}
Exemple #18
0
/*
 *	kva_alloc:
 *
 *	Allocate a virtual address range with no underlying object and
 *	no initial mapping to physical memory.  Any mapping from this
 *	range to physical memory must be explicitly created prior to
 *	its use, typically with pmap_qenter().  Any attempt to create
 *	a mapping on demand through vm_fault() will result in a panic. 
 */
vm_offset_t
kva_alloc(vm_size_t size)
{
	vm_offset_t addr;

	size = round_page(size);
	if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr))
		return (0);

	return (addr);
}
Exemple #19
0
static void *
qc_poolpage_alloc(struct pool *pool, int prflags)
{
	qcache_t *qc = QC_POOL_TO_QCACHE(pool);
	vmem_t *vm = qc->qc_vmem;
	vmem_addr_t addr;

	if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
	    prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0)
		return NULL;
	return (void *)addr;
}
Exemple #20
0
static void *
pool_page_alloc_vmem_meta(struct pool *pp, int flags)
{
	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
	vmem_addr_t va;
	int ret;

	ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
	    (vflags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va);

	return ret ? NULL : (void *)va;
}
Exemple #21
0
static int
kstat_resize_raw(kstat_t *ksp)
{
	if (ksp->ks_raw_bufsize == KSTAT_RAW_MAX)
		return ENOMEM;

	vmem_free(ksp->ks_raw_buf, ksp->ks_raw_bufsize);
	ksp->ks_raw_bufsize = MIN(ksp->ks_raw_bufsize * 2, KSTAT_RAW_MAX);
	ksp->ks_raw_buf = vmem_alloc(ksp->ks_raw_bufsize, KM_SLEEP);

	return 0;
}
Exemple #22
0
percpu_t *
percpu_alloc(size_t size)
{
    unsigned int offset;
    percpu_t *pc;

    ASSERT_SLEEPABLE();
    offset = vmem_alloc(percpu_offset_arena, size, VM_SLEEP | VM_BESTFIT);
    pc = (percpu_t *)percpu_encrypt((uintptr_t)offset);
    percpu_zero(pc, size);
    return pc;
}
Exemple #23
0
void *
promplat_alloc(size_t size)
{

	mutex_enter(&promplat_lock);
	if (promplat_arena == NULL) {
		promplat_arena = vmem_create("promplat", NULL, 0, 8,
		    segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
	}
	mutex_exit(&promplat_lock);

	return (vmem_alloc(promplat_arena, size, VM_NOSLEEP));
}
Exemple #24
0
static int
alloc_ppods(struct tom_data *td, int n, u_int *ppod_addr)
{
    vmem_addr_t v;
    int rc;

    MPASS(n > 0);

    rc = vmem_alloc(td->ppod_arena, PPOD_SZ(n), M_NOWAIT | M_FIRSTFIT, &v);
    *ppod_addr = (u_int)v;

    return (rc);
}
Exemple #25
0
/*
 *	Allocates a region from the kernel address map and physical pages
 *	within the specified address range to the kernel object.  Creates a
 *	wired mapping from this region to these pages, and returns the
 *	region's starting virtual address.  The allocated pages are not
 *	necessarily physically contiguous.  If M_ZERO is specified through the
 *	given flags, then the pages are zeroed before they are mapped.
 */
vm_offset_t
kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
    vm_paddr_t high, vm_memattr_t memattr)
{
	vmem_t *vmem;
	vm_object_t object = kernel_object;
	vm_offset_t addr, i, offset;
	vm_page_t m;
	int pflags, tries;

	size = round_page(size);
	vmem = vm_dom[domain].vmd_kernel_arena;
	if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
		return (0);
	offset = addr - VM_MIN_KERNEL_ADDRESS;
	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
	pflags |= VM_ALLOC_NOWAIT;
	VM_OBJECT_WLOCK(object);
	for (i = 0; i < size; i += PAGE_SIZE) {
		tries = 0;
retry:
		m = vm_page_alloc_contig_domain(object, atop(offset + i),
		    domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr);
		if (m == NULL) {
			VM_OBJECT_WUNLOCK(object);
			if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
				if (!vm_page_reclaim_contig_domain(domain,
				    pflags, 1, low, high, PAGE_SIZE, 0) &&
				    (flags & M_WAITOK) != 0)
					vm_wait_domain(domain);
				VM_OBJECT_WLOCK(object);
				tries++;
				goto retry;
			}
			kmem_unback(object, addr, i);
			vmem_free(vmem, addr, size);
			return (0);
		}
		KASSERT(vm_phys_domain(m) == domain,
		    ("kmem_alloc_attr_domain: Domain mismatch %d != %d",
		    vm_phys_domain(m), domain));
		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
			pmap_zero_page(m);
		m->valid = VM_PAGE_BITS_ALL;
		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
	}
	VM_OBJECT_WUNLOCK(object);
	return (addr);
}
/*
 * uvm_emap_alloc: allocate a window.
 */
vaddr_t
uvm_emap_alloc(vsize_t size, bool waitok)
{
	vmem_addr_t addr;

	KASSERT(size > 0);
	KASSERT(round_page(size) == size);

	if (vmem_alloc(uvm_emap_vmem, size,
	    VM_INSTANTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP), &addr) == 0)
		return (vaddr_t)addr;

	return (vaddr_t)0;
}
Exemple #27
0
percpu_t *
percpu_alloc(size_t size)
{
	vmem_addr_t offset;
	percpu_t *pc;

	ASSERT_SLEEPABLE();
	if (vmem_alloc(percpu_offset_arena, size, VM_SLEEP | VM_BESTFIT,
	    &offset) != 0)
		return NULL;
	pc = (percpu_t *)percpu_encrypt((uintptr_t)offset);
	percpu_zero(pc, size);
	return pc;
}
Exemple #28
0
/*ARGSUSED1*/
static int
mm_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
{
	int i;
	struct mem_minor {
		char *name;
		minor_t minor;
		int privonly;
		const char *rdpriv;
		const char *wrpriv;
		mode_t priv_mode;
	} mm[] = {
		{ "mem",	M_MEM,		0,	NULL,	"all",	0640 },
		{ "kmem",	M_KMEM,		0,	NULL,	"all",	0640 },
		{ "allkmem",	M_ALLKMEM,	0,	"all",	"all",	0600 },
		{ "null",	M_NULL,	PRIVONLY_DEV,	NULL,	NULL,	0666 },
		{ "zero",	M_ZERO, PRIVONLY_DEV,	NULL,	NULL,	0666 },
	};
	kstat_t *ksp;

	mutex_init(&mm_lock, NULL, MUTEX_DEFAULT, NULL);
	mm_map = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);

	for (i = 0; i < (sizeof (mm) / sizeof (mm[0])); i++) {
		if (ddi_create_priv_minor_node(devi, mm[i].name, S_IFCHR,
		    mm[i].minor, DDI_PSEUDO, mm[i].privonly,
		    mm[i].rdpriv, mm[i].wrpriv, mm[i].priv_mode) ==
		    DDI_FAILURE) {
			ddi_remove_minor_node(devi, NULL);
			return (DDI_FAILURE);
		}
	}

	mm_dip = devi;

	ksp = kstat_create("mm", 0, "phys_installed", "misc",
	    KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VAR_SIZE | KSTAT_FLAG_VIRTUAL);
	if (ksp != NULL) {
		ksp->ks_update = mm_kstat_update;
		ksp->ks_snapshot = mm_kstat_snapshot;
		ksp->ks_lock = &mm_lock; /* XXX - not really needed */
		kstat_install(ksp);
	}

	mm_kmem_io_access = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
	    "kmem_io_access", 0);

	return (DDI_SUCCESS);
}
Exemple #29
0
static uint16_t
iscsit_tsih_alloc(void)
{
	uintptr_t result;

	result = (uintptr_t)vmem_alloc(iscsit_global.global_tsih_pool,
	    1, VM_NOSLEEP | VM_NEXTFIT);

	/* ISCSI_UNSPEC_TSIH (0) indicates failure */
	if (result > ISCSI_MAX_TSIH) {
		vmem_free(iscsit_global.global_tsih_pool, (void *)result, 1);
		result = ISCSI_UNSPEC_TSIH;
	}

	return ((uint16_t)result);
}
Exemple #30
0
/*
 * Initialize the MemGuard mock allocator.  All objects from MemGuard come
 * out of a single VM map (contiguous chunk of address space).
 */
void
memguard_init(vmem_t *parent)
{
	vm_offset_t base;

	vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base);
	vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize,
	    PAGE_SIZE, 0, M_WAITOK);
	memguard_cursor = base;
	memguard_base = base;

	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
	printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
	printf("\tMEMGUARD map size: %jd KBytes\n",
	    (uintmax_t)memguard_mapsize >> 10);
}