コード例 #1
0
/**
 * Unlink a shared memory object.
 *
 * This service unlinks the shared memory object named @a name. The shared
 * memory object is not destroyed until every file descriptor obtained with the
 * shm_open() service is closed with the close() service and all mappings done
 * with mmap() are unmapped with munmap(). However, after a call to this
 * service, the unlinked shared memory object may no longer be reached 
 * with the shm_open() service.
 *
 * @param name name of the shared memory obect to be unlinked.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EPERM, the caller context is invalid;
 * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters;
 * - ENOENT, the shared memory object does not exist.
 * 
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - kernel-space cancellation cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode);
 * - user-space cancellation cleanup routine.
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/shm_unlink.html">
 * Specification.</a>
 * 
 */
int shm_unlink(const char *name)
{
	pse51_node_t *node;
	pse51_shm_t *shm;
	int err;
	spl_t s;

	if (xnpod_interrupt_p() || !xnpod_root_p()) {
		err = EPERM;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);

	err = pse51_node_remove(&node, name, PSE51_SHM_MAGIC);

	if (err) {
		xnlock_put_irqrestore(&nklock, s);
	      error:
		thread_set_errno(err);
		return -1;
	}

	shm = node2shm(node);
	pse51_shm_put(shm, 0);

	xnlock_put_irqrestore(&nklock, s);

	return 0;
}
コード例 #2
0
int rt_heap_delete_inner(RT_HEAP *heap, void __user *mapaddr)
{
	int err = 0;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP);

	if (!heap) {
		err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP);
		xnlock_put_irqrestore(&nklock, s);
		return err;
	}

	xeno_mark_deleted(heap);

	/* Get out of the nklocked section before releasing the heap
	   memory, since we are about to invoke Linux kernel
	   services. */

	xnlock_put_irqrestore(&nklock, s);

	/*
	 * The heap descriptor has been marked as deleted before we
	 * released the superlock thus preventing any sucessful
	 * subsequent calls of rt_heap_delete(), so now we can
	 * actually destroy it safely.
	 */

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (heap->mode & H_MAPPABLE)
		err = xnheap_destroy_mapped(&heap->heap_base,
					    __heap_post_release, mapaddr);
	else
#endif /* CONFIG_XENO_OPT_PERVASIVE */
		err = xnheap_destroy(&heap->heap_base, &__heap_flush_private, NULL);

	xnlock_get_irqsave(&nklock, s);

	if (err)
		heap->magic = XENO_HEAP_MAGIC;
	else if (!(heap->mode & H_MAPPABLE))
		__heap_post_release(&heap->heap_base);

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
コード例 #3
0
ファイル: heap.c プロジェクト: BhargavKola/xenomai-forge
int rt_heap_delete_inner(RT_HEAP *heap, void __user *mapaddr)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP);

	if (!heap) {
		err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP);
		xnlock_put_irqrestore(&nklock, s);
		return err;
	}

	xeno_mark_deleted(heap);

	/* Get out of the nklocked section before releasing the heap
	   memory, since we are about to invoke Linux kernel
	   services. */

	xnlock_put_irqrestore(&nklock, s);

	/*
	 * The heap descriptor has been marked as deleted before we
	 * released the superlock thus preventing any subsequent call
	 * to rt_heap_delete() to succeed, so now we can actually
	 * destroy it safely.
	 */

#ifndef __XENO_SIM__
	if (heap->mode & H_MAPPABLE)
		xnheap_destroy_mapped(&heap->heap_base,
				      __heap_post_release, mapaddr);
	else
#endif
	{
		xnheap_destroy(&heap->heap_base, &__heap_flush_private, NULL);
		__heap_post_release(&heap->heap_base);
	}

	return 0;
}
コード例 #4
0
/**
 * Close a file descriptor.
 *
 * This service closes the file descriptor @a fd. In kernel-space, this service
 * only works for file descriptors opened with shm_open(), i.e. shared memory
 * objects. A shared memory object is only destroyed once all file descriptors
 * are closed with this service, it is unlinked with the shm_unlink() service,
 * and all mappings are unmapped with the munmap() service.
 *
 * @param fd file descriptor.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EBADF, @a fd is not a valid file descriptor (in kernel-space, it was not
 *   obtained with shm_open());
 * - EPERM, the caller context is invalid.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - kernel-space cancellation cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode);
 * - user-space cancellation cleanup routine.
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/close.html">
 * Specification.</a>
 * 
 */
int close(int fd)
{
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	spl_t s;
	int err;

	xnlock_get_irqsave(&nklock, s);

	shm = pse51_shm_get(&desc, fd, 0);

	if (IS_ERR(shm)) {
		err = -PTR_ERR(shm);
		goto err_put;
	}

	if (xnpod_interrupt_p() || !xnpod_root_p()) {
		err = EPERM;
		goto err_put;
	}

	pse51_shm_put(shm, 1);
	xnlock_put_irqrestore(&nklock, s);

	err = pse51_desc_destroy(desc);
	if (err)
		goto error;

	return 0;

  err_put:
	xnlock_put_irqrestore(&nklock, s);
  error:
	thread_set_errno(err);
	return -1;
}
コード例 #5
0
/**
 * Unmap pages of memory.
 *
 * This service unmaps the shared memory region [addr;addr+len) from the caller
 * address-space.
 *
 * When called from kernel-space the memory region remain accessible as long as
 * it exists, and this service only decrements a reference counter.
 *
 * When called from user-space, if the region is not a shared memory region,
 * this service falls back to the regular Linux munmap() service.
 *
 * @param addr start address of shared memory area;
 *
 * @param len length of the shared memory area.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EINVAL, @a len is null, @a addr is not a multiple of the page size or the
 *   range [addr;addr+len) is not a mapped region;
 * - ENXIO, @a addr is not the address of a shared memory area;
 * - EPERM, the caller context is invalid;
 * - EINTR, this service was interrupted by a signal.
 * 
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - kernel-space cancellation cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode);
 * - user-space cancellation cleanup routine.
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/munmap.html">
 * Specification.</a>
 * 
 */
int munmap(void *addr, size_t len)
{
	pse51_shm_map_t *mapping = NULL;
	xnholder_t *holder;
	pse51_shm_t *shm;
	int err;
	spl_t s;

	if (!len) {
		err = EINVAL;
		goto error;
	}

	if (((unsigned long)addr) % PAGE_SIZE) {
		err = EINVAL;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);
	shm = pse51_shm_lookup(addr);

	if (!shm) {
		xnlock_put_irqrestore(&nklock, s);
		err = ENXIO;
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		xnlock_put_irqrestore(&nklock, s);
		err = EPERM;
		goto error;
	}

	++shm->nodebase.refcount;
	xnlock_put_irqrestore(&nklock, s);

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_shm_put;
	}

	for (holder = getheadq(&shm->mappings);
	     holder; holder = nextq(&shm->mappings, holder)) {
		mapping = link2map(holder);

		if (mapping->addr == addr && mapping->size == len)
			break;
	}

	if (!holder) {
		xnlock_put_irqrestore(&nklock, s);
		err = EINVAL;
		goto err_up;
	}

	removeq(&shm->mappings, holder);
	up(&shm->maplock);

	xnfree(mapping);
	pse51_shm_put(shm, 2);
	return 0;

      err_up:
	up(&shm->maplock);
      err_shm_put:
	pse51_shm_put(shm, 1);
      error:
	thread_set_errno(err);
	return -1;
}
コード例 #6
0
/**
 * Map pages of memory.
 *
 * This service allow shared memory regions to be accessed by the caller.
 *
 * When used in kernel-space, this service returns the address of the offset @a
 * off of the shared memory object underlying @a fd. The protection flags @a
 * prot, are only checked for consistency with @a fd open flags, but memory
 * protection is unsupported. An existing shared memory region exists before it
 * is mapped, this service only increments a reference counter.
 *
 * The only supported value for @a flags is @a MAP_SHARED.
 *
 * When used in user-space, this service maps the specified shared memory region
 * into the caller address-space. If @a fd is not a shared memory object
 * descriptor (i.e. not obtained with shm_open()), this service falls back to
 * the regular Linux mmap service.
 *
 * @param addr ignored.
 *
 * @param len size of the shared memory region to be mapped.
 *
 * @param prot protection bits, checked in kernel-space, but only useful in
 * user-space, are a bitwise or of the following values:
 * - PROT_NONE, meaning that the mapped region can not be accessed;
 * - PROT_READ, meaning that the mapped region can be read;
 * - PROT_WRITE, meaning that the mapped region can be written;
 * - PROT_EXEC, meaning that the mapped region can be executed.
 *
 * @param flags only MAP_SHARED is accepted, meaning that the mapped memory
 * region is shared.
 *
 * @param fd file descriptor, obtained with shm_open().
 *
 * @param off offset in the shared memory region.
 *
 * @retval 0 on success;
 * @retval MAP_FAILED with @a errno set if:
 * - EINVAL, @a len is null or @a addr is not a multiple of @a PAGE_SIZE;
 * - EBADF, @a fd is not a shared memory object descriptor (obtained with
 *   shm_open());
 * - EPERM, the caller context is invalid;
 * - ENOTSUP, @a flags is not @a MAP_SHARED;
 * - EACCES, @a fd is not opened for reading or is not opend for writing and
 *   PROT_WRITE is set in @a prot;
 * - EINTR, this service was interrupted by a signal;
 * - ENXIO, the range [off;off+len) is invalid for the shared memory region
 *   specified by @a fd;
 * - EAGAIN, insufficient memory exists in the system heap to create the
 *   mapping, increase CONFIG_XENO_OPT_SYS_HEAPSZ.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mmap.html">
 * Specification.</a>
 * 
 */
void *mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off)
{
	pse51_shm_map_t *map;
	unsigned desc_flags;
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	void *result;
	int err;
	spl_t s;

	if (!len) {
		err = EINVAL;
		goto error;
	}

	if (((unsigned long)addr) % PAGE_SIZE) {
		err = EINVAL;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);

	shm = pse51_shm_get(&desc, fd, 1);

	if (IS_ERR(shm)) {
		xnlock_put_irqrestore(&nklock, s);
		err = -PTR_ERR(shm);
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		err = EPERM;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	if (flags != MAP_SHARED) {
		err = ENOTSUP;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	desc_flags = pse51_desc_getflags(desc) & PSE51_PERMS_MASK;
	xnlock_put_irqrestore(&nklock, s);

	if ((desc_flags != O_RDWR && desc_flags != O_RDONLY) ||
	    ((prot & PROT_WRITE) && desc_flags == O_RDONLY)) {
		err = EACCES;
		goto err_shm_put;
	}

	map = (pse51_shm_map_t *) xnmalloc(sizeof(*map));
	if (!map) {
		err = EAGAIN;
		goto err_shm_put;
	}

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_free_map;
	}

	if (!shm->addr || off + len > shm->size) {
		err = ENXIO;
		up(&shm->maplock);
		goto err_free_map;
	}

	/* Align the heap address on a page boundary. */
	result = (void *)PAGE_ALIGN((u_long)shm->addr);
	map->addr = result = (void *)((char *)result + off);
	map->size = len;
	inith(&map->link);
	prependq(&shm->mappings, &map->link);
	up(&shm->maplock);

	return result;

  err_free_map:
	xnfree(map);
  err_shm_put:
	pse51_shm_put(shm, 1);
  error:
	thread_set_errno(err);
	return MAP_FAILED;
}
コード例 #7
0
/**
 * Truncate a file or shared memory object to a specified length.
 *
 * When used in kernel-space, this service set to @a len the size of a shared
 * memory object opened with the shm_open() service. In user-space this service
 * falls back to Linux regular ftruncate service for file descriptors not
 * obtained with shm_open(). When this service is used to increase the size of a
 * shared memory object, the added space is zero-filled.
 *
 * Shared memory are suitable for direct memory access (allocated in physically
 * contiguous memory) if O_DIRECT was passed to shm_open.
 *
 * Shared memory objects may only be resized if they are not currently mapped.
 *
 * @param fd file descriptor;
 *
 * @param len new length of the underlying file or shared memory object.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EBADF, @a fd is not a valid file descriptor;
 * - EPERM, the caller context is invalid;
 * - EINVAL, the specified length is invalid;
 * - EINVAL, the architecture can not honour the O_DIRECT flag;
 * - EINTR, this service was interrupted by a signal;
 * - EBUSY, @a fd is a shared memory object descriptor and the underlying shared
 *   memory is currently mapped;
 * - EFBIG, allocation of system memory failed.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/ftruncate.html">
 * Specification.</a>
 * 
 */
int ftruncate(int fd, off_t len)
{
	unsigned desc_flags;
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	int err;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);
	shm = pse51_shm_get(&desc, fd, 1);

	if (IS_ERR(shm)) {
		err = -PTR_ERR(shm);
		xnlock_put_irqrestore(&nklock, s);
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		err = EPERM;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	if (len < 0) {
		err = EINVAL;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	desc_flags = pse51_desc_getflags(desc);
	xnlock_put_irqrestore(&nklock, s);

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_shm_put;
	}

	/* Allocate one more page for alignment (the address returned by mmap
	   must be aligned on a page boundary). */
	if (len)
#ifdef CONFIG_XENO_OPT_PERVASIVE
		len = xnheap_rounded_size(len + PAGE_SIZE, PAGE_SIZE);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
		len = xnheap_rounded_size(len + PAGE_SIZE, XNHEAP_PAGE_SIZE);
#endif /* !CONFIG_XENO_OPT_PERVASIVE */

	err = 0;
	if (emptyq_p(&shm->mappings)) {
		/* Temporary storage, in order to preserve the memory contents upon
		   resizing, if possible. */
		void *addr = NULL;
		size_t size = 0;

		if (shm->addr) {
			if (len == xnheap_extentsize(&shm->heapbase)) {
				/* Size unchanged, skip copy and reinit. */
				err = 0;
				goto err_up;
			}

			size = xnheap_max_contiguous(&shm->heapbase);
			addr = xnarch_alloc_host_mem(size);
			if (!addr) {
				err = ENOMEM;
				goto err_up;
			}

			memcpy(addr, shm->addr, size);

			xnheap_free(&shm->heapbase, shm->addr);
			xnheap_destroy_mapped(&shm->heapbase, NULL, NULL);

			shm->addr = NULL;
			shm->size = 0;
		}

		if (len) {
			int flags = XNARCH_SHARED_HEAP_FLAGS |
				((desc_flags & O_DIRECT) ? GFP_DMA : 0);

			err = -xnheap_init_mapped(&shm->heapbase, len, flags);
			if (err)
				goto err_up;

			xnheap_set_label(&shm->heapbase,
					 "posix shm: %s", shm->nodebase.name);

			shm->size = xnheap_max_contiguous(&shm->heapbase);
			shm->addr = xnheap_alloc(&shm->heapbase, shm->size);
			/* Required. */
			memset(shm->addr, '\0', shm->size);

			/* Copy the previous contents. */
			if (addr)
				memcpy(shm->addr, addr,
				       shm->size < size ? shm->size : size);

			shm->size -= PAGE_SIZE;
		}

		if (addr)
			xnarch_free_host_mem(addr, size);
	} else if (len != xnheap_extentsize(&shm->heapbase))
		err = EBUSY;

      err_up:
	up(&shm->maplock);

      err_shm_put:
	pse51_shm_put(shm, 1);

	if (!err)
		return 0;

      error:
	thread_set_errno(err == ENOMEM ? EFBIG : err);
	return -1;
}
コード例 #8
0
/**
 * Open a shared memory object.
 *
 * This service establishes a connection between a shared memory object and a
 * file descriptor. Further use of this descriptor will allow to dimension and
 * map the shared memory into the calling context address space.
 *
 * One of the following access mode should be set in @a oflags:
 * - O_RDONLY, meaning that the shared memory object may only be mapped with the
 *   PROT_READ flag;
 * - O_WRONLY, meaning that the shared memory object may only be mapped with the
 *   PROT_WRITE flag;
 * - O_RDWR, meaning that the shared memory object may be mapped with the
 *   PROT_READ | PROT_WRITE flag.
 *
 * If no shared memory object  named @a name exists, and @a oflags has the @a
 * O_CREAT bit set, the shared memory object is created by this function.
 *
 * If @a oflags has the two bits @a O_CREAT and @a O_EXCL set and the shared
 * memory object alread exists, this service fails.
 *
 * If @a oflags has the bit @a O_TRUNC set, the shared memory exists and is not
 * currently mapped, its size is truncated to 0.
 *
 * If @a oflags has the bit @a O_DIRECT set, the shared memory will be suitable
 * for direct memory access (allocated in physically contiguous memory).
 *
 * @a name may be any arbitrary string, in which slashes have no particular
 * meaning. However, for portability, using a name which starts with a slash and
 * contains no other slash is recommended.
 *
 * @param name name of the shared memory object to open;
 *
 * @param oflags flags.
 *
 * @param mode ignored.
 *
 * @return a file descriptor on success;
 * @return -1 with @a errno set if:
 * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters;
 * - EEXIST, the bits @a O_CREAT and @a O_EXCL were set in @a oflags and the
 *   shared memory object already exists;
 * - ENOENT, the bit @a O_CREAT is not set in @a oflags and the shared memory
 *   object does not exist;
 * - ENOSPC, insufficient memory exists in the system heap to create the shared
 *   memory object, increase CONFIG_XENO_OPT_SYS_HEAPSZ;
 * - EPERM, the caller context is invalid;
 * - EINVAL, the O_TRUNC flag was specified and the shared memory object is
 *   currently mapped;
 * - EMFILE, too many descriptors are currently open.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/shm_open.html">
 * Specification.</a>
 * 
 */
int shm_open(const char *name, int oflags, mode_t mode)
{
	pse51_node_t *node;
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	int err, fd;
	spl_t s;

	/* From root context only. */
	if (xnpod_asynch_p() || !xnpod_root_p()) {
		thread_set_errno(EPERM);
		return -1;
	}

	xnlock_get_irqsave(&nklock, s);
	err = pse51_node_get(&node, name, PSE51_SHM_MAGIC, oflags);
	xnlock_put_irqrestore(&nklock, s);
	if (err)
		goto error;

	if (node) {
		shm = node2shm(node);
		goto got_shm;
	}

	/* We must create the shared memory object, not yet allocated. */
	shm = (pse51_shm_t *) xnmalloc(sizeof(*shm));
	if (!shm) {
		err = ENOSPC;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);
	err = pse51_node_add(&shm->nodebase, name, PSE51_SHM_MAGIC);
	if (err && err != EEXIST)
		goto err_unlock;

	if (err == EEXIST) {
		/* same shm was created in the mean time, rollback. */
		err = pse51_node_get(&node, name, PSE51_SHM_MAGIC, oflags);
	  err_unlock:
		xnlock_put_irqrestore(&nklock, s);
		xnfree(shm);
		if (err)
			goto error;

		shm = node2shm(node);
		goto got_shm;
	}

	pse51_shm_init(shm);
	xnlock_put_irqrestore(&nklock, s);

  got_shm:
	err = pse51_desc_create(&desc, &shm->nodebase,
				oflags & (PSE51_PERMS_MASK | O_DIRECT));
	if (err)
		goto err_shm_put;

	fd = pse51_desc_fd(desc);

	if ((oflags & O_TRUNC) && ftruncate(fd, 0)) {
		close(fd);
		return -1;
	}

	return fd;

  err_shm_put:
	pse51_shm_put(shm, 1);
  error:
	thread_set_errno(err);
	return -1;
}
コード例 #9
0
ファイル: heap.c プロジェクト: BhargavKola/xenomai-forge
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_DMA32) ? GFP_DMA32 : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNHEAP_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNHEAP_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}
	xnheap_set_label(&heap->heap_base, "rt_heap: %s", name);

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO), NULL);
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(heap->name, heap, &heap->handle,
				       &__heap_pnode.node);

		if (err)
			rt_heap_delete(heap);
	}

	return err;
}
コード例 #10
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

#ifdef CONFIG_XENO_OPT_PERVASIVE
		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
#else /* !CONFIG_XENO_OPT_PERVASIVE */
		return -ENOSYS;
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNCORE_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO));
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_REGISTRY
	/* <!> Since xnregister_enter() may reschedule, only register
	   complete objects, so that the registry cannot return handles to
	   half-baked objects... */

	if (name) {
		xnpnode_t *pnode = &__heap_pnode;

		if (!*name) {
			/* Since this is an anonymous object (empty name on entry)
			   from user-space, it gets registered under an unique
			   internal name but is not exported through /proc. */
			xnobject_create_name(heap->name, sizeof(heap->name),
					     (void *)heap);
			pnode = NULL;
		}

		err = xnregistry_enter(heap->name, heap, &heap->handle, pnode);

		if (err)
			rt_heap_delete(heap);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	return err;
}