Beispiel #1
0
int cobalt_umm_init(struct cobalt_umm *umm, u32 size,
		    void (*release)(struct cobalt_umm *umm))
{
	void *basemem;
	int ret;

	secondary_mode_only();

	size = PAGE_ALIGN(size);
	basemem = __vmalloc(size, GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO,
			    xnarch_cache_aliasing() ?
			    pgprot_noncached(PAGE_KERNEL) : PAGE_KERNEL);
	if (basemem == NULL)
		return -ENOMEM;

	ret = xnheap_init(&umm->heap, basemem, size);
	if (ret) {
		vfree(basemem);
		return ret;
	}

	umm->release = release;
	atomic_set(&umm->refcount, 1);
	smp_mb();

	return 0;
}
Beispiel #2
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_DMA32) ? GFP_DMA32 : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNHEAP_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNHEAP_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}
	xnheap_set_label(&heap->heap_base, "rt_heap: %s", name);

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO), NULL);
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(heap->name, heap, &heap->handle,
				       &__heap_pnode.node);

		if (err)
			rt_heap_delete(heap);
	}

	return err;
}
Beispiel #3
0
static xnshm_a_t *create_new_heap(unsigned long name, int heapsize, int suprt)
{
	xnshm_a_t *p;
	int err;

	p = xnheap_alloc(&kheap, sizeof(xnshm_a_t));
	if (!p)
		return NULL;

	p->heap = xnheap_alloc(&kheap, sizeof(xnheap_t));
	if (!p->heap) {
		xnheap_free(&kheap, p);
		return NULL;
	}

	/*
	 * Account for the minimum heap size and overhead so that the
	 * actual free space is large enough to match the requested
	 * size.
	 */

#ifdef CONFIG_XENO_OPT_PERVASIVE
	heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

	err = xnheap_init_mapped(p->heap,
				 heapsize,
				 suprt == USE_GFP_KERNEL ? GFP_KERNEL : 0);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem) {
			err = -ENOMEM;
		} else {

			err = xnheap_init(p->heap, heapmem, heapsize, XNCORE_PAGE_SIZE);
			if (err) {
				xnarch_free_host_mem(heapmem, heapsize);
			}
		}
	}
#endif /* !CONFIG_XENO_OPT_PERVASIVE */
	if (err) {
		xnheap_free(&kheap, p->heap);
		xnheap_free(&kheap, p);
		return NULL;
	}

	p->chunk = xnheap_mapped_address(p->heap, 0);

	memset(p->chunk, 0, heapsize);

	inith(&p->link);
	p->ref = 1;
	p->name = name;
	p->size = heapsize;

	return p;
}
Beispiel #4
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

#ifdef CONFIG_XENO_OPT_PERVASIVE
		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
#else /* !CONFIG_XENO_OPT_PERVASIVE */
		return -ENOSYS;
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNCORE_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO));
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_REGISTRY
	/* <!> Since xnregister_enter() may reschedule, only register
	   complete objects, so that the registry cannot return handles to
	   half-baked objects... */

	if (name) {
		xnpnode_t *pnode = &__heap_pnode;

		if (!*name) {
			/* Since this is an anonymous object (empty name on entry)
			   from user-space, it gets registered under an unique
			   internal name but is not exported through /proc. */
			xnobject_create_name(heap->name, sizeof(heap->name),
					     (void *)heap);
			pnode = NULL;
		}

		err = xnregistry_enter(heap->name, heap, &heap->handle, pnode);

		if (err)
			rt_heap_delete(heap);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	return err;
}