static void *rtdm_skin_callback(int event, void *data) { struct rtdm_process *process; switch (event) { case XNSHADOW_CLIENT_ATTACH: process = xnarch_alloc_host_mem(sizeof(*process)); if (!process) return ERR_PTR(-ENOSPC); #ifdef CONFIG_XENO_OPT_VFILE memcpy(process->name, current->comm, sizeof(process->name)); process->pid = current->pid; #endif /* CONFIG_XENO_OPT_VFILE */ return &process->ppd; case XNSHADOW_CLIENT_DETACH: process = container_of((xnshadow_ppd_t *) data, struct rtdm_process, ppd); cleanup_owned_contexts(process); xnarch_free_host_mem(process, sizeof(*process)); break; } return NULL; }
static void *ui_shadow_eventcb(int event, void *data) { struct ui_resource_holder *rh; switch (event) { case XNSHADOW_CLIENT_ATTACH: rh = xnarch_alloc_host_mem(sizeof(*rh)); if (!rh) return ERR_PTR(-ENOMEM); initq(&rh->semq); initq(&rh->flgq); initq(&rh->mbxq); return &rh->ppd; case XNSHADOW_CLIENT_DETACH: rh = ppd2rholder((xnshadow_ppd_t *) data); ui_sem_flush_rq(&rh->semq); ui_flag_flush_rq(&rh->flgq); ui_mbx_flush_rq(&rh->mbxq); xnarch_free_host_mem(rh, sizeof(*rh)); return NULL; } return ERR_PTR(-EINVAL); }
void xntbase_free(xntbase_t *base) { spl_t s; if (base == &nktbase) return; xntslave_destroy(base2slave(base)); xntbase_remove_proc(base); xnlock_get_irqsave(&nklock, s); removeq(&nktimebaseq, &base->link); xnlock_put_irqrestore(&nklock, s); xnarch_free_host_mem(base, sizeof(*base)); }
int rt_buffer_delete(RT_BUFFER *bf) { int ret = 0, resched; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER); if (bf == NULL) { ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER); goto unlock_and_exit; } xnarch_free_host_mem(bf->bufmem, bf->bufsz); removeq(bf->rqueue, &bf->rlink); resched = xnsynch_destroy(&bf->isynch_base) == XNSYNCH_RESCHED; resched += xnsynch_destroy(&bf->osynch_base) == XNSYNCH_RESCHED; if (bf->handle) xnregistry_remove(bf->handle); xeno_mark_deleted(bf); if (resched) /* * Some task has been woken up as a result of the * deletion: reschedule now. */ xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return ret; }
static void pse51_lostage_handle_request(void *cookie) { int cpuid = smp_processor_id(), reqnum; struct pse51_lostageq_t *rq = &pse51_lostageq[cpuid]; while ((reqnum = rq->out) != rq->in) { struct pse51_lostage_req_t *req = &rq->req[reqnum]; rq->out = (reqnum + 1) & (PSE51_LO_MAX_REQUESTS - 1); switch (req->type){ #ifdef CONFIG_XENO_OPT_PERVASIVE case PSE51_LO_SIGNAL_REQ: pse51_signal_handle_request((pthread_t) req->arg); break; #endif case PSE51_LO_FREE_REQ: xnarch_free_host_mem(req->arg, req->size); break; } } }
static void *__wind_shadow_eventcb(int event, void *data) { struct wind_resource_holder *rh; switch (event) { case XNSHADOW_CLIENT_ATTACH: rh = (struct wind_resource_holder *) xnarch_alloc_host_mem(sizeof(*rh)); if (!rh) return ERR_PTR(-ENOMEM); initq(&rh->wdq); /* A single server thread pends on this. */ xnsynch_init(&rh->wdsynch, XNSYNCH_FIFO, NULL); initq(&rh->wdpending); rh->wdcount = 0; initq(&rh->msgQq); initq(&rh->semq); return &rh->ppd; case XNSHADOW_CLIENT_DETACH: rh = ppd2rholder((xnshadow_ppd_t *) data); wind_wd_flush_rq(&rh->wdq); xnsynch_destroy(&rh->wdsynch); /* No need to reschedule: all our threads have been zapped. */ wind_msgq_flush_rq(&rh->msgQq); wind_sem_flush_rq(&rh->semq); xnarch_free_host_mem(rh, sizeof(*rh)); return NULL; } return ERR_PTR(-EINVAL); }
/** * Truncate a file or shared memory object to a specified length. * * When used in kernel-space, this service set to @a len the size of a shared * memory object opened with the shm_open() service. In user-space this service * falls back to Linux regular ftruncate service for file descriptors not * obtained with shm_open(). When this service is used to increase the size of a * shared memory object, the added space is zero-filled. * * Shared memory are suitable for direct memory access (allocated in physically * contiguous memory) if O_DIRECT was passed to shm_open. * * Shared memory objects may only be resized if they are not currently mapped. * * @param fd file descriptor; * * @param len new length of the underlying file or shared memory object. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EBADF, @a fd is not a valid file descriptor; * - EPERM, the caller context is invalid; * - EINVAL, the specified length is invalid; * - EINVAL, the architecture can not honour the O_DIRECT flag; * - EINTR, this service was interrupted by a signal; * - EBUSY, @a fd is a shared memory object descriptor and the underlying shared * memory is currently mapped; * - EFBIG, allocation of system memory failed. * * @par Valid contexts: * - kernel module initialization or cleanup routine; * - user-space thread (Xenomai threads switch to secondary mode). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/ftruncate.html"> * Specification.</a> * */ int ftruncate(int fd, off_t len) { unsigned desc_flags; pse51_desc_t *desc; pse51_shm_t *shm; int err; spl_t s; xnlock_get_irqsave(&nklock, s); shm = pse51_shm_get(&desc, fd, 1); if (IS_ERR(shm)) { err = -PTR_ERR(shm); xnlock_put_irqrestore(&nklock, s); goto error; } if (xnpod_asynch_p() || !xnpod_root_p()) { err = EPERM; xnlock_put_irqrestore(&nklock, s); goto err_shm_put; } if (len < 0) { err = EINVAL; xnlock_put_irqrestore(&nklock, s); goto err_shm_put; } desc_flags = pse51_desc_getflags(desc); xnlock_put_irqrestore(&nklock, s); if (down_interruptible(&shm->maplock)) { err = EINTR; goto err_shm_put; } /* Allocate one more page for alignment (the address returned by mmap must be aligned on a page boundary). */ if (len) #ifdef CONFIG_XENO_OPT_PERVASIVE len = xnheap_rounded_size(len + PAGE_SIZE, PAGE_SIZE); #else /* !CONFIG_XENO_OPT_PERVASIVE */ len = xnheap_rounded_size(len + PAGE_SIZE, XNHEAP_PAGE_SIZE); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ err = 0; if (emptyq_p(&shm->mappings)) { /* Temporary storage, in order to preserve the memory contents upon resizing, if possible. */ void *addr = NULL; size_t size = 0; if (shm->addr) { if (len == xnheap_extentsize(&shm->heapbase)) { /* Size unchanged, skip copy and reinit. */ err = 0; goto err_up; } size = xnheap_max_contiguous(&shm->heapbase); addr = xnarch_alloc_host_mem(size); if (!addr) { err = ENOMEM; goto err_up; } memcpy(addr, shm->addr, size); xnheap_free(&shm->heapbase, shm->addr); xnheap_destroy_mapped(&shm->heapbase, NULL, NULL); shm->addr = NULL; shm->size = 0; } if (len) { int flags = XNARCH_SHARED_HEAP_FLAGS | ((desc_flags & O_DIRECT) ? GFP_DMA : 0); err = -xnheap_init_mapped(&shm->heapbase, len, flags); if (err) goto err_up; xnheap_set_label(&shm->heapbase, "posix shm: %s", shm->nodebase.name); shm->size = xnheap_max_contiguous(&shm->heapbase); shm->addr = xnheap_alloc(&shm->heapbase, shm->size); /* Required. */ memset(shm->addr, '\0', shm->size); /* Copy the previous contents. */ if (addr) memcpy(shm->addr, addr, shm->size < size ? shm->size : size); shm->size -= PAGE_SIZE; } if (addr) xnarch_free_host_mem(addr, size); } else if (len != xnheap_extentsize(&shm->heapbase)) err = EBUSY; err_up: up(&shm->maplock); err_shm_put: pse51_shm_put(shm, 1); if (!err) return 0; error: thread_set_errno(err == ENOMEM ? EFBIG : err); return -1; }
static void __heap_flush_private(xnheap_t *heap, void *heapmem, u_long heapsize, void *cookie) { xnarch_free_host_mem(heapmem, heapsize); }
static xnshm_a_t *create_new_heap(unsigned long name, int heapsize, int suprt) { xnshm_a_t *p; int err; p = xnheap_alloc(&kheap, sizeof(xnshm_a_t)); if (!p) return NULL; p->heap = xnheap_alloc(&kheap, sizeof(xnheap_t)); if (!p->heap) { xnheap_free(&kheap, p); return NULL; } /* * Account for the minimum heap size and overhead so that the * actual free space is large enough to match the requested * size. */ #ifdef CONFIG_XENO_OPT_PERVASIVE heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE); err = xnheap_init_mapped(p->heap, heapsize, suprt == USE_GFP_KERNEL ? GFP_KERNEL : 0); #else /* !CONFIG_XENO_OPT_PERVASIVE */ { void *heapmem; heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE); heapmem = xnarch_alloc_host_mem(heapsize); if (!heapmem) { err = -ENOMEM; } else { err = xnheap_init(p->heap, heapmem, heapsize, XNCORE_PAGE_SIZE); if (err) { xnarch_free_host_mem(heapmem, heapsize); } } } #endif /* !CONFIG_XENO_OPT_PERVASIVE */ if (err) { xnheap_free(&kheap, p->heap); xnheap_free(&kheap, p); return NULL; } p->chunk = xnheap_mapped_address(p->heap, 0); memset(p->chunk, 0, heapsize); inith(&p->link); p->ref = 1; p->name = name; p->size = heapsize; return p; }
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode) { int err; spl_t s; if (!xnpod_root_p()) return -EPERM; if (heapsize == 0) return -EINVAL; /* Make sure we won't hit trivial argument errors when calling xnheap_init(). */ heap->csize = heapsize; /* Record this for SBA management and inquiry. */ #ifdef __KERNEL__ if (mode & H_MAPPABLE) { if (!name || !*name) return -EINVAL; heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE); err = xnheap_init_mapped(&heap->heap_base, heapsize, ((mode & H_DMA) ? GFP_DMA : 0) | ((mode & H_DMA32) ? GFP_DMA32 : 0) | ((mode & H_NONCACHED) ? XNHEAP_GFP_NONCACHED : 0)); if (err) return err; heap->cpid = 0; } else #endif /* __KERNEL__ */ { void *heapmem; heapsize = xnheap_rounded_size(heapsize, XNHEAP_PAGE_SIZE); heapmem = xnarch_alloc_host_mem(heapsize); if (!heapmem) return -ENOMEM; err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNHEAP_PAGE_SIZE); if (err) { xnarch_free_host_mem(heapmem, heapsize); return err; } } xnheap_set_label(&heap->heap_base, "rt_heap: %s", name); xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO), NULL); heap->handle = 0; /* i.e. (still) unregistered heap. */ heap->magic = XENO_HEAP_MAGIC; heap->mode = mode; heap->sba = NULL; xnobject_copy_name(heap->name, name); inith(&heap->rlink); heap->rqueue = &xeno_get_rholder()->heapq; xnlock_get_irqsave(&nklock, s); appendq(heap->rqueue, &heap->rlink); xnlock_put_irqrestore(&nklock, s); /* * <!> Since xnregister_enter() may reschedule, only register * complete objects, so that the registry cannot return * handles to half-baked objects... */ if (name) { err = xnregistry_enter(heap->name, heap, &heap->handle, &__heap_pnode.node); if (err) rt_heap_delete(heap); } return err; }
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode) { int err; spl_t s; if (!xnpod_root_p()) return -EPERM; if (heapsize == 0) return -EINVAL; /* Make sure we won't hit trivial argument errors when calling xnheap_init(). */ heap->csize = heapsize; /* Record this for SBA management and inquiry. */ #ifdef __KERNEL__ if (mode & H_MAPPABLE) { if (!name || !*name) return -EINVAL; #ifdef CONFIG_XENO_OPT_PERVASIVE heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE); err = xnheap_init_mapped(&heap->heap_base, heapsize, ((mode & H_DMA) ? GFP_DMA : 0) | ((mode & H_NONCACHED) ? XNHEAP_GFP_NONCACHED : 0)); if (err) return err; heap->cpid = 0; #else /* !CONFIG_XENO_OPT_PERVASIVE */ return -ENOSYS; #endif /* CONFIG_XENO_OPT_PERVASIVE */ } else #endif /* __KERNEL__ */ { void *heapmem; heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE); heapmem = xnarch_alloc_host_mem(heapsize); if (!heapmem) return -ENOMEM; err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNCORE_PAGE_SIZE); if (err) { xnarch_free_host_mem(heapmem, heapsize); return err; } } xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO)); heap->handle = 0; /* i.e. (still) unregistered heap. */ heap->magic = XENO_HEAP_MAGIC; heap->mode = mode; heap->sba = NULL; xnobject_copy_name(heap->name, name); inith(&heap->rlink); heap->rqueue = &xeno_get_rholder()->heapq; xnlock_get_irqsave(&nklock, s); appendq(heap->rqueue, &heap->rlink); xnlock_put_irqrestore(&nklock, s); #ifdef CONFIG_XENO_OPT_REGISTRY /* <!> Since xnregister_enter() may reschedule, only register complete objects, so that the registry cannot return handles to half-baked objects... */ if (name) { xnpnode_t *pnode = &__heap_pnode; if (!*name) { /* Since this is an anonymous object (empty name on entry) from user-space, it gets registered under an unique internal name but is not exported through /proc. */ xnobject_create_name(heap->name, sizeof(heap->name), (void *)heap); pnode = NULL; } err = xnregistry_enter(heap->name, heap, &heap->handle, pnode); if (err) rt_heap_delete(heap); } #endif /* CONFIG_XENO_OPT_REGISTRY */ return err; }