Пример #1
0
static void *rtdm_skin_callback(int event, void *data)
{
	struct rtdm_process *process;

	switch (event) {
	case XNSHADOW_CLIENT_ATTACH:
		process = xnarch_alloc_host_mem(sizeof(*process));
		if (!process)
			return ERR_PTR(-ENOSPC);

#ifdef CONFIG_XENO_OPT_VFILE
		memcpy(process->name, current->comm, sizeof(process->name));
		process->pid = current->pid;
#endif /* CONFIG_XENO_OPT_VFILE */

		return &process->ppd;

	case XNSHADOW_CLIENT_DETACH:
		process = container_of((xnshadow_ppd_t *) data,
				       struct rtdm_process, ppd);

		cleanup_owned_contexts(process);

		xnarch_free_host_mem(process, sizeof(*process));

		break;
	}
	return NULL;
}
Пример #2
0
static void *ui_shadow_eventcb(int event, void *data)
{
	struct ui_resource_holder *rh;

	switch (event) {

	case XNSHADOW_CLIENT_ATTACH:

		rh = xnarch_alloc_host_mem(sizeof(*rh));
		if (!rh)
			return ERR_PTR(-ENOMEM);

		initq(&rh->semq);
		initq(&rh->flgq);
		initq(&rh->mbxq);

		return &rh->ppd;

	case XNSHADOW_CLIENT_DETACH:

		rh = ppd2rholder((xnshadow_ppd_t *) data);
		ui_sem_flush_rq(&rh->semq);
		ui_flag_flush_rq(&rh->flgq);
		ui_mbx_flush_rq(&rh->mbxq);

		xnarch_free_host_mem(rh, sizeof(*rh));

		return NULL;
	}

	return ERR_PTR(-EINVAL);
}
Пример #3
0
int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode)
{
	int ret = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if (bufsz == 0)
		return -EINVAL;

	bf->bufmem = xnarch_alloc_host_mem(bufsz);
	if (bf->bufmem == NULL)
		return -ENOMEM;

	xnsynch_init(&bf->isynch_base, mode & B_PRIO, NULL);
	xnsynch_init(&bf->osynch_base, mode & B_PRIO, NULL);

	bf->handle = 0;	/* i.e. (still) unregistered buffer. */
	xnobject_copy_name(bf->name, name);
	inith(&bf->rlink);
	bf->rqueue = &xeno_get_rholder()->bufferq;
	xnlock_get_irqsave(&nklock, s);
	appendq(bf->rqueue, &bf->rlink);
	xnlock_put_irqrestore(&nklock, s);

	bf->mode = mode;
	bf->bufsz = bufsz;
	bf->rdoff = 0;
	bf->wroff = 0;
	bf->fillsz = 0;
	bf->rdtoken = 0;
	bf->wrtoken = 0;

#ifndef __XENO_SIM__
	bf->cpid = 0;
#endif
	bf->magic = XENO_BUFFER_MAGIC;

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		ret = xnregistry_enter(bf->name, bf, &bf->handle,
				       &__buffer_pnode.node);

		if (ret)
			rt_buffer_delete(bf);
	}

	return ret;
}
Пример #4
0
int xntbase_alloc(const char *name, u_long period, u_long flags,
		  xntbase_t **basep)
{
	xntslave_t *slave;
	xntbase_t *base;
	spl_t s;

	if (flags & ~XNTBISO)
		return -EINVAL;

	if (period == XN_APERIODIC_TICK) {
		*basep = &nktbase;
		xnarch_declare_tbase(&nktbase);
		return 0;
	}

	slave = (xntslave_t *)xnarch_alloc_host_mem(sizeof(*slave));

	if (!slave)
		return -ENOMEM;

	base = &slave->base;
	base->tickvalue = period;
	base->ticks2sec = 1000000000UL / period;
	base->wallclock_offset = 0;
	base->jiffies = 0;
	base->hook = NULL;
	base->ops = &nktimer_ops_periodic;
	base->name = name;
	inith(&base->link);
	xntslave_init(slave);

	/* Set initial status:
	   Not running, no time set, unlocked, isolated if requested. */
	base->status = flags;

	*basep = base;
#ifdef CONFIG_XENO_OPT_STATS
	initq(&base->timerq);
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */
	xntbase_declare_proc(base);
	xnlock_get_irqsave(&nklock, s);
	appendq(&nktimebaseq, &base->link);
	xnlock_put_irqrestore(&nklock, s);

	xnarch_declare_tbase(base);

	return 0;
}
Пример #5
0
static void *__wind_shadow_eventcb(int event, void *data)
{
	struct wind_resource_holder *rh;
	switch (event) {

	case XNSHADOW_CLIENT_ATTACH:

		rh = (struct wind_resource_holder *)
		    xnarch_alloc_host_mem(sizeof(*rh));
		if (!rh)
			return ERR_PTR(-ENOMEM);

		initq(&rh->wdq);
		/* A single server thread pends on this. */
		xnsynch_init(&rh->wdsynch, XNSYNCH_FIFO, NULL);
		initq(&rh->wdpending);
		rh->wdcount = 0;
		initq(&rh->msgQq);
		initq(&rh->semq);

		return &rh->ppd;

	case XNSHADOW_CLIENT_DETACH:

		rh = ppd2rholder((xnshadow_ppd_t *) data);
		wind_wd_flush_rq(&rh->wdq);
		xnsynch_destroy(&rh->wdsynch);
		/* No need to reschedule: all our threads have been zapped. */
		wind_msgq_flush_rq(&rh->msgQq);
		wind_sem_flush_rq(&rh->semq);

		xnarch_free_host_mem(rh, sizeof(*rh));

		return NULL;
	}

	return ERR_PTR(-EINVAL);
}
Пример #6
0
/**
 * Truncate a file or shared memory object to a specified length.
 *
 * When used in kernel-space, this service set to @a len the size of a shared
 * memory object opened with the shm_open() service. In user-space this service
 * falls back to Linux regular ftruncate service for file descriptors not
 * obtained with shm_open(). When this service is used to increase the size of a
 * shared memory object, the added space is zero-filled.
 *
 * Shared memory are suitable for direct memory access (allocated in physically
 * contiguous memory) if O_DIRECT was passed to shm_open.
 *
 * Shared memory objects may only be resized if they are not currently mapped.
 *
 * @param fd file descriptor;
 *
 * @param len new length of the underlying file or shared memory object.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EBADF, @a fd is not a valid file descriptor;
 * - EPERM, the caller context is invalid;
 * - EINVAL, the specified length is invalid;
 * - EINVAL, the architecture can not honour the O_DIRECT flag;
 * - EINTR, this service was interrupted by a signal;
 * - EBUSY, @a fd is a shared memory object descriptor and the underlying shared
 *   memory is currently mapped;
 * - EFBIG, allocation of system memory failed.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/ftruncate.html">
 * Specification.</a>
 * 
 */
int ftruncate(int fd, off_t len)
{
	unsigned desc_flags;
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	int err;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);
	shm = pse51_shm_get(&desc, fd, 1);

	if (IS_ERR(shm)) {
		err = -PTR_ERR(shm);
		xnlock_put_irqrestore(&nklock, s);
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		err = EPERM;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	if (len < 0) {
		err = EINVAL;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	desc_flags = pse51_desc_getflags(desc);
	xnlock_put_irqrestore(&nklock, s);

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_shm_put;
	}

	/* Allocate one more page for alignment (the address returned by mmap
	   must be aligned on a page boundary). */
	if (len)
#ifdef CONFIG_XENO_OPT_PERVASIVE
		len = xnheap_rounded_size(len + PAGE_SIZE, PAGE_SIZE);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
		len = xnheap_rounded_size(len + PAGE_SIZE, XNHEAP_PAGE_SIZE);
#endif /* !CONFIG_XENO_OPT_PERVASIVE */

	err = 0;
	if (emptyq_p(&shm->mappings)) {
		/* Temporary storage, in order to preserve the memory contents upon
		   resizing, if possible. */
		void *addr = NULL;
		size_t size = 0;

		if (shm->addr) {
			if (len == xnheap_extentsize(&shm->heapbase)) {
				/* Size unchanged, skip copy and reinit. */
				err = 0;
				goto err_up;
			}

			size = xnheap_max_contiguous(&shm->heapbase);
			addr = xnarch_alloc_host_mem(size);
			if (!addr) {
				err = ENOMEM;
				goto err_up;
			}

			memcpy(addr, shm->addr, size);

			xnheap_free(&shm->heapbase, shm->addr);
			xnheap_destroy_mapped(&shm->heapbase, NULL, NULL);

			shm->addr = NULL;
			shm->size = 0;
		}

		if (len) {
			int flags = XNARCH_SHARED_HEAP_FLAGS |
				((desc_flags & O_DIRECT) ? GFP_DMA : 0);

			err = -xnheap_init_mapped(&shm->heapbase, len, flags);
			if (err)
				goto err_up;

			xnheap_set_label(&shm->heapbase,
					 "posix shm: %s", shm->nodebase.name);

			shm->size = xnheap_max_contiguous(&shm->heapbase);
			shm->addr = xnheap_alloc(&shm->heapbase, shm->size);
			/* Required. */
			memset(shm->addr, '\0', shm->size);

			/* Copy the previous contents. */
			if (addr)
				memcpy(shm->addr, addr,
				       shm->size < size ? shm->size : size);

			shm->size -= PAGE_SIZE;
		}

		if (addr)
			xnarch_free_host_mem(addr, size);
	} else if (len != xnheap_extentsize(&shm->heapbase))
		err = EBUSY;

      err_up:
	up(&shm->maplock);

      err_shm_put:
	pse51_shm_put(shm, 1);

	if (!err)
		return 0;

      error:
	thread_set_errno(err == ENOMEM ? EFBIG : err);
	return -1;
}
Пример #7
0
static xnshm_a_t *create_new_heap(unsigned long name, int heapsize, int suprt)
{
	xnshm_a_t *p;
	int err;

	p = xnheap_alloc(&kheap, sizeof(xnshm_a_t));
	if (!p)
		return NULL;

	p->heap = xnheap_alloc(&kheap, sizeof(xnheap_t));
	if (!p->heap) {
		xnheap_free(&kheap, p);
		return NULL;
	}

	/*
	 * Account for the minimum heap size and overhead so that the
	 * actual free space is large enough to match the requested
	 * size.
	 */

#ifdef CONFIG_XENO_OPT_PERVASIVE
	heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

	err = xnheap_init_mapped(p->heap,
				 heapsize,
				 suprt == USE_GFP_KERNEL ? GFP_KERNEL : 0);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem) {
			err = -ENOMEM;
		} else {

			err = xnheap_init(p->heap, heapmem, heapsize, XNCORE_PAGE_SIZE);
			if (err) {
				xnarch_free_host_mem(heapmem, heapsize);
			}
		}
	}
#endif /* !CONFIG_XENO_OPT_PERVASIVE */
	if (err) {
		xnheap_free(&kheap, p->heap);
		xnheap_free(&kheap, p);
		return NULL;
	}

	p->chunk = xnheap_mapped_address(p->heap, 0);

	memset(p->chunk, 0, heapsize);

	inith(&p->link);
	p->ref = 1;
	p->name = name;
	p->size = heapsize;

	return p;
}
Пример #8
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_DMA32) ? GFP_DMA32 : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNHEAP_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNHEAP_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}
	xnheap_set_label(&heap->heap_base, "rt_heap: %s", name);

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO), NULL);
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(heap->name, heap, &heap->handle,
				       &__heap_pnode.node);

		if (err)
			rt_heap_delete(heap);
	}

	return err;
}
Пример #9
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

#ifdef CONFIG_XENO_OPT_PERVASIVE
		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
#else /* !CONFIG_XENO_OPT_PERVASIVE */
		return -ENOSYS;
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNCORE_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO));
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_REGISTRY
	/* <!> Since xnregister_enter() may reschedule, only register
	   complete objects, so that the registry cannot return handles to
	   half-baked objects... */

	if (name) {
		xnpnode_t *pnode = &__heap_pnode;

		if (!*name) {
			/* Since this is an anonymous object (empty name on entry)
			   from user-space, it gets registered under an unique
			   internal name but is not exported through /proc. */
			xnobject_create_name(heap->name, sizeof(heap->name),
					     (void *)heap);
			pnode = NULL;
		}

		err = xnregistry_enter(heap->name, heap, &heap->handle, pnode);

		if (err)
			rt_heap_delete(heap);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	return err;
}