Example #1
0
/*  ======== dmm_create_tables ========
 *  Purpose:
 *      Create table to hold the information of physical address
 *      the buffer pages that is passed by the user, and the table
 *      to hold the information of the virtual memory that is reserved
 *      for DSP.
 */
int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
{
	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
	int status = 0;

	status = dmm_delete_tables(dmm_obj);
	if (!status) {
		dyn_mem_map_beg = addr;
		table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
		/*  Create the free list */
		virtual_mapping_table = __vmalloc(table_size *
				sizeof(struct map_page), GFP_KERNEL |
				__GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
		if (virtual_mapping_table == NULL)
			status = -ENOMEM;
		else {
			/* On successful allocation,
			 * all entries are zero ('free') */
			free_region = 0;
			free_size = table_size * PG_SIZE4K;
			virtual_mapping_table[0].region_size = table_size;
		}
	}

	if (status)
		pr_err("%s: failure, status 0x%x\n", __func__, status);

	return status;
}
Example #2
0
int cobalt_umm_init(struct cobalt_umm *umm, u32 size,
		    void (*release)(struct cobalt_umm *umm))
{
	void *basemem;
	int ret;

	secondary_mode_only();

	size = PAGE_ALIGN(size);
	basemem = __vmalloc(size, GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO,
			    xnarch_cache_aliasing() ?
			    pgprot_noncached(PAGE_KERNEL) : PAGE_KERNEL);
	if (basemem == NULL)
		return -ENOMEM;

	ret = xnheap_init(&umm->heap, basemem, size);
	if (ret) {
		vfree(basemem);
		return ret;
	}

	umm->release = release;
	atomic_set(&umm->refcount, 1);
	smp_mb();

	return 0;
}
Example #3
0
int reqsk_queue_alloc(struct request_sock_queue *queue,
		      unsigned int nr_table_entries,
		      gfp_t flags)
{
	size_t lopt_size = sizeof(struct listen_sock);
	struct listen_sock *lopt = NULL;

	nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
	nr_table_entries = max_t(u32, nr_table_entries, 8);
	nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
	lopt_size += nr_table_entries * sizeof(struct request_sock *);

	if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
		lopt = kzalloc(lopt_size, flags |
					  __GFP_NOWARN |
					  __GFP_NORETRY);
	if (!lopt)
		lopt = __vmalloc(lopt_size,
			flags | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
	if (!lopt)
		return -ENOMEM;

	get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
	rwlock_init(&queue->syn_wait_lock);
	queue->rskq_accept_head = NULL;
	lopt->nr_table_entries = nr_table_entries;
	lopt->max_qlen_log = ilog2(nr_table_entries);

	write_lock_bh(&queue->syn_wait_lock);
	queue->listen_opt = lopt;
	write_unlock_bh(&queue->syn_wait_lock);

	return 0;
}
Example #4
0
struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
{
	struct ceph_buffer *b;

	b = kmalloc(sizeof(*b), gfp);
	if (!b)
		return NULL;

	b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN);
	if (b->vec.iov_base) {
		b->is_vmalloc = false;
	} else {
		b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL);
		if (!b->vec.iov_base) {
			kfree(b);
			return NULL;
		}
		b->is_vmalloc = true;
	}

	kref_init(&b->kref);
	b->alloc_len = len;
	b->vec.iov_len = len;
	dout("buffer_new %p\n", b);
	return b;
}
int reqsk_queue_alloc(struct request_sock_queue *queue,
		      unsigned int nr_table_entries)
{
	size_t lopt_size = sizeof(struct listen_sock);
	struct listen_sock *lopt;

	nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
	nr_table_entries = max_t(u32, nr_table_entries, 8);
	nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
	lopt_size += nr_table_entries * sizeof(struct request_sock *);
	if (lopt_size > PAGE_SIZE)
		lopt = __vmalloc(lopt_size,
			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
	else
		lopt = kzalloc(lopt_size, GFP_KERNEL);
	if (lopt == NULL)
		return -ENOMEM;

	for (lopt->max_qlen_log = 3;
	     (1 << lopt->max_qlen_log) < nr_table_entries;
	     lopt->max_qlen_log++);

	get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
	rwlock_init(&queue->syn_wait_lock);
	queue->rskq_accept_head = NULL;
	lopt->nr_table_entries = nr_table_entries;

	write_lock_bh(&queue->syn_wait_lock);
	queue->listen_opt = lopt;
	write_unlock_bh(&queue->syn_wait_lock);

	return 0;
}
Example #6
0
int tipc_ref_table_init(u32 requested_size, u32 start)
{
	struct reference *table;
	u32 actual_size;

	

	requested_size++;
	for (actual_size = 16; actual_size < requested_size; actual_size <<= 1)
		 ;

	

	table = __vmalloc(actual_size * sizeof(struct reference),
			  GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
	if (table == NULL)
		return -ENOMEM;

	tipc_ref_table.entries = table;
	tipc_ref_table.capacity = requested_size;
	tipc_ref_table.init_point = 1;
	tipc_ref_table.first_free = 0;
	tipc_ref_table.last_free = 0;
	tipc_ref_table.index_mask = actual_size - 1;
	tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;

	return 0;
}
Example #7
0
static inline
void *__brick_block_alloc(gfp_t gfp, int order, int cline)
{
	void *res;
#ifdef CONFIG_MARS_MEM_RETRY
	for (;;) {
#endif
#ifdef USE_KERNEL_PAGES
		res = (void*)__get_free_pages(gfp, order);
#else
		res = __vmalloc(PAGE_SIZE << order, gfp, PAGE_KERNEL_IO);
#endif
#ifdef CONFIG_MARS_MEM_RETRY
		if (likely(res))
			break;
		msleep(1000);
	}
#endif

	if (likely(res)) {
#ifdef CONFIG_MARS_DEBUG_MEM_STRONG
		_new_block_info(res, PAGE_SIZE << order, cline);
#endif
#ifdef BRICK_DEBUG_MEM
		atomic_inc(&phys_block_alloc);
		atomic_inc(&raw_count[order]);
#endif
		atomic64_add((PAGE_SIZE/1024) << order, &brick_global_block_used);
	}

	return res;
}
Example #8
0
void *libcfs_kvzalloc(size_t size, gfp_t flags)
{
	void *ret;

	ret = kzalloc(size, flags | __GFP_NOWARN);
	if (!ret)
		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
	return ret;
}
Example #9
0
void *ceph_kvmalloc(size_t size, gfp_t flags)
{
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
		void *ptr = kmalloc(size, flags | __GFP_NOWARN);
		if (ptr)
			return ptr;
	}

	return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
}
Example #10
0
/**
 * self_check_write - make sure write succeeded.
 * @ubi: UBI device description object
 * @buf: buffer with data which were written
 * @pnum: physical eraseblock number the data were written to
 * @offset: offset within the physical eraseblock the data were written to
 * @len: how many bytes were written
 *
 * This functions reads data which were recently written and compares it with
 * the original data buffer - the data have to match. Returns zero if the data
 * match and a negative error code if not or in case of failure.
 */
static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
			    int offset, int len)
{
	int err, i;
	size_t read;
	void *buf1;
	loff_t addr = (loff_t)pnum * ubi->peb_size + offset;

	if (!ubi_dbg_chk_io(ubi))
		return 0;

	buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
	if (!buf1) {
		ubi_err("cannot allocate memory to check writes");
		return 0;
	}

	err = mtd_read(ubi->mtd, addr, len, &read, buf1);
	if (err && !mtd_is_bitflip(err))
		goto out_free;

	for (i = 0; i < len; i++) {
		uint8_t c = ((uint8_t *)buf)[i];
		uint8_t c1 = ((uint8_t *)buf1)[i];
#if !defined(CONFIG_UBI_SILENCE_MSG)
		int dump_len = max_t(int, 128, len - i);
#endif

		if (c == c1)
			continue;

		ubi_err("self-check failed for PEB %d:%d, len %d",
			pnum, offset, len);
		ubi_msg("data differ at position %d", i);
		ubi_msg("hex dump of the original buffer from %d to %d",
			i, i + dump_len);
		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
			       buf + i, dump_len, 1);
		ubi_msg("hex dump of the read buffer from %d to %d",
			i, i + dump_len);
		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
			       buf1 + i, dump_len, 1);
		dump_stack();
		err = -EINVAL;
		goto out_free;
	}

	vfree(buf1);
	return 0;

out_free:
	vfree(buf1);
	return err;
}
//开辟的空间大小是sizeof(struct listen_sock) + nr_table_entries * sizeof(struct request_sock *),所以syn_table指向后面的nr_table_entries * sizeof(struct request_sock *)部分,这只是hash表头的空间
int reqsk_queue_alloc(struct request_sock_queue *queue,
		      unsigned int nr_table_entries)
{
	size_t lopt_size = sizeof(struct listen_sock);
	struct listen_sock *lopt;

    /*
	 * 取用户设定的连接队列长度最大值参数nr_table_entries和系统最多
	 * 可同时存在未完成三次握手SYN请求数sysctl_max_syn_backlog两者的
	 * 最小值,他们都用来控制连接队列的长度,只是前者针对某传输控制
	 * 块,而后者控制的是全局的

	 这里可以看出listen_sock->max_qlen_log 为nr_table_entries和sysctl_max_syn_backlog的最小值加1
   并向上去整到2的次方后的log。
   比如: nr_table_entries = 128 sysctl_max_syn_backlog=20480,
               min(nr_table_entries, sysctl_max_syn_backlog)= 128
               roundup_pow_of_two(128+1)=256
               max_qlen_log=8
    */
	 
	nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
	nr_table_entries = max_t(u32, nr_table_entries, 8);
	/*
	 * 调用roundup_pow_of_two以确保nr_table_entries的值为2的n次方
	 */
	nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
	/*
	 * 计算用来保存SYN请求连接的listen_sock结构的大小
	 */
	lopt_size += nr_table_entries * sizeof(struct request_sock *);//注意(struct request_sock *)是指针,空间大小为4
	if (lopt_size > PAGE_SIZE)
		lopt = __vmalloc(lopt_size,
			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
	else
		lopt = kzalloc(lopt_size, GFP_KERNEL); 
	if (lopt == NULL)
		return -ENOMEM;
    //开辟的空间大小是sizeof(struct listen_sock) + nr_table_entries * sizeof(struct request_sock *),所以syn_table指向后面的nr_table_entries * sizeof(struct request_sock *)部分,这只是hash表头的空间
	for (lopt->max_qlen_log = 3;
	     (1 << lopt->max_qlen_log) < nr_table_entries;
	     lopt->max_qlen_log++);

	get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
	rwlock_init(&queue->syn_wait_lock);
	queue->rskq_accept_head = NULL;
	lopt->nr_table_entries = nr_table_entries;

	write_lock_bh(&queue->syn_wait_lock);
	queue->listen_opt = lopt; //queue->listen_opt指向request_sock
	write_unlock_bh(&queue->syn_wait_lock);

	return 0;
}
Example #12
0
static void *
repl___vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
    void *ret_val;
    ret_val = __vmalloc(size, gfp_mask, prot);

    if (ret_val != NULL)
        klc_add_alloc(ret_val, size, stack_depth);

    return ret_val;
}
Example #13
0
static void *alloc_fdmem(size_t size)
{
	/*
	 * Very large allocations can stress page reclaim, so fall back to
	 * vmalloc() if the allocation size will be considered "large" by the VM.
	 */
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
		void *data = kmalloc(size, GFP_KERNEL_ACCOUNT |
				     __GFP_NOWARN | __GFP_NORETRY);
		if (data != NULL)
			return data;
	}
	return __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM, PAGE_KERNEL);
}
Example #14
0
struct hlist_head *xfrm_hash_alloc(unsigned int sz)
{
    struct hlist_head *n;

    if (sz <= PAGE_SIZE)
        n = kzalloc(sz, GFP_KERNEL);
    else if (hashdist)
        n = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
    else
        n = (struct hlist_head *)
            __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
                             get_order(sz));

    return n;
}
Example #15
0
File: groups.c Project: mdamt/linux
struct group_info *groups_alloc(int gidsetsize)
{
	struct group_info *gi;
	unsigned int len;

	len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize;
	gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY);
	if (!gi)
		gi = __vmalloc(len, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
	if (!gi)
		return NULL;

	atomic_set(&gi->usage, 1);
	gi->ngroups = gidsetsize;
	return gi;
}
Example #16
0
static int allocate_dsa(void)
{
	free_dsa();

	g_dsa = __vmalloc(sizeof(struct px_tp_dsa),
                          GFP_KERNEL,
                          pgprot_noncached(PAGE_KERNEL));

	if (g_dsa == NULL)
	{
		return -ENOMEM;
	}

	memset(g_dsa, 0, sizeof(struct px_tp_dsa));

	return 0;
}
Example #17
0
void *vmalloc_user(unsigned long size)
{
	void *ret;

	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
	if (ret) {
		struct vm_area_struct *vma;

		down_write(&current->mm->mmap_sem);
		vma = find_vma(current->mm, (unsigned long)ret);
		if (vma)
			vma->vm_flags |= VM_USERMAP;
		up_write(&current->mm->mmap_sem);
	}

	return ret;
}
Example #18
0
File: io.c Project: 383530895/linux
/**
 * ubi_self_check_all_ff - check that a region of flash is empty.
 * @ubi: UBI device description object
 * @pnum: the physical eraseblock number to check
 * @offset: the starting offset within the physical eraseblock to check
 * @len: the length of the region to check
 *
 * This function returns zero if only 0xFF bytes are present at offset
 * @offset of the physical eraseblock @pnum, and a negative error code if not
 * or if an error occurred.
 */
int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
	size_t read;
	int err;
	void *buf;
	loff_t addr = (loff_t)pnum * ubi->peb_size + offset;

	if (!ubi_dbg_chk_io(ubi))
		return 0;

	buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
	if (!buf) {
		ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
		return 0;
	}

	err = mtd_read(ubi->mtd, addr, len, &read, buf);
	if (err && !mtd_is_bitflip(err)) {
		ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
			err, len, pnum, offset, read);
		goto error;
	}

	err = ubi_check_pattern(buf, 0xFF, len);
	if (err == 0) {
		ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
			pnum, offset, len);
		goto fail;
	}

	vfree(buf);
	return 0;

fail:
	ubi_err(ubi, "self-check failed for PEB %d", pnum);
	ubi_msg(ubi, "hex dump of the %d-%d region",
		 offset, offset + len);
	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
	err = -EINVAL;
error:
	dump_stack();
	vfree(buf);
	return err;
}
static unsigned long *alloc_bitmap(u32 bitmap_size)
{
    void *mem;

    /*
     * The allocation size varies, observed numbers were < 4K up to 16K.
     * Using vmalloc unconditionally would be too heavy, we'll try
     * contiguous allocations first.
     */
    if  (bitmap_size <= PAGE_SIZE)
        return kzalloc(bitmap_size, GFP_NOFS);

    mem = kzalloc(bitmap_size, GFP_NOFS | __GFP_NOWARN);
    if (mem)
        return mem;

    return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO,
                     PAGE_KERNEL);
}
Example #20
0
int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
				      size_t size, gfp_t gfp_flags)
{
	struct snd_pcm_runtime *runtime;

	if (PCM_RUNTIME_CHECK(substream))
		return -EINVAL;
	runtime = substream->runtime;
	if (runtime->dma_area) {
		if (runtime->dma_bytes >= size)
			return 0; /* already large enough */
		vfree(runtime->dma_area);
	}
	runtime->dma_area = __vmalloc(size, gfp_flags, PAGE_KERNEL);
	if (!runtime->dma_area)
		return -ENOMEM;
	runtime->dma_bytes = size;
	return 1;
}
Example #21
0
File: mem.c Project: nkedel/MRAMFS
void * mfs_saAllocateRegion( int mb ) {
  void * region;
  long size = mb * MEG;
  if ( mb < 4 ) {
    TRACE( "mfs_saAllocateRegion called without a size or with size < 4mb.\n" );
    return NULL;
  }
  if ( mb > 1024 ) {
    TRACE( "mfs_saAllocateRegion called with a size > 1 gb\n" );
    return NULL;
  }
  region = __vmalloc( size, GFP_NOIO, PAGE_KERNEL );
  if ( region == NULL ) {
    TRACE( "mfs_saAllocateRegion failed to allocate memory\n" );
    return NULL;
  }
  TRACE( "mfs_saAllocateRegion allocated %d mb @ %08x\n", mb, ( unsigned int ) region );
  memset( region, 0, 65536 );
  return region;
}
Example #22
0
static void *zcomp_lz4_create(void)
{
	void *ret;

	/*
	 * This function can be called in swapout/fs write path
	 * so we can't use GFP_FS|IO. And it assumes we already
	 * have at least one stream in zram initialization so we
	 * don't do best effort to allocate more stream in here.
	 * A default stream will work well without further multiple
	 * streams. That's why we use NORETRY | NOWARN.
	 */
	ret = kzalloc(LZ4_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
					__GFP_NOWARN);
	if (!ret)
		ret = __vmalloc(LZ4_MEM_COMPRESS,
				GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
				__GFP_ZERO | __GFP_HIGHMEM,
				PAGE_KERNEL);
	return ret;
}
Example #23
0
void *wrap__vmalloc(unsigned long size, unsigned int gfp_mask, pgprot_t prot,
		    const char *file, int line)
{
	struct alloc_info *info;
	info = __vmalloc(size + sizeof(*info), gfp_mask, prot);
	if (!info)
		return NULL;
	info->type = ALLOC_TYPE_VMALLOC;
	info->size = size;
	atomic_add(size, &alloc_sizes[info->type]);
#if ALLOC_DEBUG > 1
	info->file = file;
	info->line = line;
#if ALLOC_DEBUG > 2
	info->tag = 0;
#endif
	nt_spin_lock(&alloc_lock);
	InsertTailList(&allocs, &info->list);
	nt_spin_unlock(&alloc_lock);
#endif
	return (info + 1);
}
Example #24
0
void *wrap__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
		    const char *file, int line)
{
	struct alloc_info *info;

	info = __vmalloc(size + sizeof(*info), gfp_mask, prot);
	if (!info)
		return NULL;
	if (gfp_mask & GFP_ATOMIC)
		info->type = ALLOC_TYPE_VMALLOC_ATOMIC;
	else
		info->type = ALLOC_TYPE_VMALLOC_NON_ATOMIC;
	info->size = size;
	atomic_add(size, &alloc_sizes[info->type]);
#if ALLOC_DEBUG > 1
	info->file = file;
	info->line = line;
	info->tag = 0;
	spin_lock_bh(&alloc_lock);
	InsertTailList(&allocs, &info->list);
	spin_unlock_bh(&alloc_lock);
#endif
	return info + 1;
}
BCE_ERROR BCAllocDiscontigMemory(unsigned long ulSize,
                              BCE_HANDLE unref__ *phMemHandle,
                              IMG_CPU_VIRTADDR *pLinAddr,
                              IMG_SYS_PHYADDR **ppPhysAddr)
{
	unsigned long ulPages = RANGE_TO_PAGES(ulSize);
	IMG_SYS_PHYADDR *pPhysAddr;
	unsigned long ulPage;
	IMG_CPU_VIRTADDR LinAddr;

	LinAddr = __vmalloc(ulSize, GFP_KERNEL | __GFP_HIGHMEM, pgprot_noncached(PAGE_KERNEL));
	if (!LinAddr)
	{
		return BCE_ERROR_OUT_OF_MEMORY;
	}

	pPhysAddr = kmalloc(ulPages * sizeof(IMG_SYS_PHYADDR), GFP_KERNEL);
	if (!pPhysAddr)
	{
		vfree(LinAddr);
		return BCE_ERROR_OUT_OF_MEMORY;
	}

	*pLinAddr = LinAddr;

	for (ulPage = 0; ulPage < ulPages; ulPage++)
	{
		pPhysAddr[ulPage].uiAddr = VMALLOC_TO_PAGE_PHYS(LinAddr);

		LinAddr += PAGE_SIZE;
	}

	*ppPhysAddr = pPhysAddr;

	return BCE_OK;
}
Example #26
0
void *_VMallocWrapper(u32 ui32Bytes, u32 ui32AllocFlags, char *pszFileName,
		      u32 ui32Line)
{
	pgprot_t PGProtFlags;
	void *pvRet;

	switch (ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK) {
	case PVRSRV_HAP_CACHED:
		PGProtFlags = PAGE_KERNEL;
		break;
	case PVRSRV_HAP_WRITECOMBINE:
		PGProtFlags = PGPROT_WC(PAGE_KERNEL);
		break;
	case PVRSRV_HAP_UNCACHED:
		PGProtFlags = PGPROT_UC(PAGE_KERNEL);
		break;
	default:
		PVR_DPF(PVR_DBG_ERROR,
			 "VMAllocWrapper: unknown mapping flags=0x%08lx",
			 ui32AllocFlags);
		dump_stack();
		return NULL;
	}

	pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);

#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
	if (pvRet)
		DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC,
				       pvRet, pvRet, 0, NULL,
				       PAGE_ALIGN(ui32Bytes),
				       pszFileName, ui32Line);
#endif

	return pvRet;
}
Example #27
0
/**
 * OS specific allocation function.
 */
DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
{
    PRTMEMHDR pHdr;
    IPRT_LINUX_SAVE_EFL_AC();

    /*
     * Allocate.
     */
    if (fFlags & RTMEMHDR_FLAG_EXEC)
    {
        if (fFlags & RTMEMHDR_FLAG_ANY_CTX)
            return VERR_NOT_SUPPORTED;

#if defined(RT_ARCH_AMD64)
# ifdef RTMEMALLOC_EXEC_HEAP
        if (g_HeapExec != NIL_RTHEAPSIMPLE)
        {
            RTSpinlockAcquire(g_HeapExecSpinlock);
            pHdr = (PRTMEMHDR)RTHeapSimpleAlloc(g_HeapExec, cb + sizeof(*pHdr), 0);
            RTSpinlockRelease(g_HeapExecSpinlock);
            fFlags |= RTMEMHDR_FLAG_EXEC_HEAP;
        }
        else
            pHdr = NULL;

# elif defined(RTMEMALLOC_EXEC_VM_AREA)
        pHdr = rtR0MemAllocExecVmArea(cb);
        fFlags |= RTMEMHDR_FLAG_EXEC_VM_AREA;

# else  /* !RTMEMALLOC_EXEC_HEAP */
# error "you don not want to go here..."
        pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC);
# endif /* !RTMEMALLOC_EXEC_HEAP */

#elif defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE)
        pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC);
#else
        pHdr = (PRTMEMHDR)vmalloc(cb + sizeof(*pHdr));
#endif
    }
    else
    {
        if (
#if 1 /* vmalloc has serious performance issues, avoid it. */
               cb <= PAGE_SIZE*16 - sizeof(*pHdr)
#else
               cb <= PAGE_SIZE
#endif
            || (fFlags & RTMEMHDR_FLAG_ANY_CTX)
           )
        {
            fFlags |= RTMEMHDR_FLAG_KMALLOC;
            pHdr = kmalloc(cb + sizeof(*pHdr),
                           (fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC) ? (GFP_ATOMIC | __GFP_NOWARN)
                                                                  : (GFP_KERNEL | __GFP_NOWARN));
            if (RT_UNLIKELY(   !pHdr
                            && cb > PAGE_SIZE
                            && !(fFlags & RTMEMHDR_FLAG_ANY_CTX) ))
            {
                fFlags &= ~RTMEMHDR_FLAG_KMALLOC;
                pHdr = vmalloc(cb + sizeof(*pHdr));
            }
        }
        else
            pHdr = vmalloc(cb + sizeof(*pHdr));
    }
    if (RT_UNLIKELY(!pHdr))
    {
        IPRT_LINUX_RESTORE_EFL_AC();
        return VERR_NO_MEMORY;
    }

    /*
     * Initialize.
     */
    pHdr->u32Magic  = RTMEMHDR_MAGIC;
    pHdr->fFlags    = fFlags;
    pHdr->cb        = cb;
    pHdr->cbReq     = cb;

    *ppHdr = pHdr;
    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
Example #28
0
void *vmalloc_uncached (unsigned long size)
{
	return __vmalloc (size, GFP_KERNEL | __GFP_HIGHMEM,
	                  PAGE_KERNEL_UNCACHED);
}
Example #29
0
/* allocate executable and writable memory */
void *memory_alloc_exec(word size)
{
	heap_t *found_heap = NULL;
	heap_t *new_heap = NULL;
	unsigned long flags;
	void *ret = NULL;

	spin_lock_irqsave(&memory_lock, flags);
	found_heap = heaps;
	size = ROUNDUP(size, sizeof(word));
	if (size < sizeof(word)) {
		goto unlock;
	}

	if (size >= ((PAGE_SIZE - sizeof(heap_t)) / 2)) {
		/* big buffers receive a whole page */
#ifdef __KERNEL__
		ret = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL_EXEC);
		if (NULL == ret) {
#else
		ret = mmap(NULL, ROUNDUP(size, PAGE_SIZE), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
		if ((sword)ret == -1) {
#endif
			ret = NULL;
		}
		goto unlock;
	}

	/* find the heap with elements big enough for us */
	while (NULL != found_heap) {
		if (found_heap->elem_size >= size && found_heap->allocated < found_heap->num_elem) {
			break;
		}
		found_heap = found_heap->next;
	}

	if (NULL == found_heap) {
		/* we need to create a new heap */
#ifdef __KERNEL__
		new_heap = __vmalloc(PAGE_SIZE, GFP_ATOMIC, PAGE_KERNEL_EXEC);
		if (NULL == new_heap) {
#else
		new_heap = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
		if ((sword)new_heap == -1) {
#endif
			goto unlock;
		}

		memory_set(new_heap, 0, PAGE_SIZE);
		new_heap->next = NULL;
		new_heap->elem_size = size;
		new_heap->allocated = 1;
		new_heap->num_elem = (PAGE_SIZE - sizeof(heap_t)) / size;
		new_heap->first_elem = (byte *)new_heap + sizeof(heap_t) + size;
		if (heaps == NULL) {
			heaps = new_heap;
		} else {
			found_heap = heaps;
			while (NULL != found_heap->next) {
				found_heap = found_heap->next;
			}
			found_heap->next = new_heap;
		}
		/* return the first element */
		ret = (void *)((byte *)new_heap + sizeof(heap_t));
		goto unlock;
	} else {
		/* we found a heap */
		found_heap->allocated++;
		ret = found_heap->first_elem;
		if (found_heap->allocated < found_heap->num_elem) {
			/* update the next element to be returned */
			if (*(byte **)(found_heap->first_elem) == NULL) {
				found_heap->first_elem += found_heap->elem_size;
			} else {
				found_heap->first_elem = *(byte **)(found_heap->first_elem);
			}
		}
		goto unlock;
	}
	/* we should never get here */
	ret = NULL;
unlock:
	spin_unlock_irqrestore(&memory_lock, flags);
	return ret;
}

/* free an executable buffer */
void memory_free_exec(void *mem)
{
	heap_t *found_heap = NULL;
	heap_t *del_heap = NULL;
	unsigned long flags;

	spin_lock_irqsave(&memory_lock, flags);
	found_heap = (heap_t *)(ROUNDDOWN((word)(mem), PAGE_SIZE));
	if (((word)mem & (PAGE_SIZE - 1)) == 0) {
		/* if the buffer is page aligned, it is a large buffer */
#ifdef __KERNEL__
		vfree(mem);
#else
		munmap(mem, PAGE_SIZE);
#endif
		goto unlock;
	}

	found_heap->allocated--;
	if (0 == found_heap->allocated) {
		/* the heap is now empty. we free it: */
		if (heaps == found_heap) {
			heaps = found_heap->next;
		} else {
			del_heap = heaps;
			while (del_heap->next != found_heap) {
				del_heap = del_heap->next;
			}
			del_heap->next = found_heap->next;
		}
#ifdef __KERNEL__
		vfree(found_heap);
#else
		munmap(found_heap, PAGE_SIZE);
#endif
	} else {
		/* the heap still have buffers in use. put this buffer in the list */
		*(byte **)mem = found_heap->first_elem;
		found_heap->first_elem = mem;
	}
unlock:
	spin_unlock_irqrestore(&memory_lock, flags);
}
Example #30
0
/**
 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 *	@size:		allocation size
 *
 *	Allocate enough 32bit PA addressable pages to cover @size from the
 *	page level allocator and map them into contiguous kernel virtual space.
 */
void *vmalloc_32(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
}