static int _shm_free(unsigned long name) { int ret = 0; xnholder_t *holder; xnshm_a_t *p; spl_t s; xnlock_get_irqsave(&nklock, s); holder = getheadq(&xnshm_allocq); while (holder != NULL) { p = link2shma(holder); if (p->name == name && --p->ref == 0) { #ifdef CONFIG_XENO_OPT_REGISTRY if (p->handle) xnregistry_remove(p->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ if (p->heap == &kheap) xnheap_free(&kheap, p->chunk); else { /* Should release lock here? * Can destroy_mapped suspend ? * [YES!] */ #ifdef CONFIG_XENO_OPT_PERVASIVE ret = xnheap_destroy_mapped(p->heap, NULL, NULL); #else /* !CONFIG_XENO_OPT_PERVASIVE */ ret = xnheap_destroy(p->heap, &__heap_flush_private, NULL); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ if (ret) goto unlock_and_exit; xnheap_free(&kheap, p->heap); } removeq(&xnshm_allocq, &p->link); ret = p->size; xnheap_free(&kheap, p); break; } holder = nextq(&xnshm_allocq, holder); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return ret; }
/* Must be called nklock locked, irq off. */ static void pse51_shm_destroy(pse51_shm_t * shm, int force) { spl_t ignored; removeq(&pse51_shmq, &shm->link); xnlock_clear_irqon(&nklock); down(&shm->maplock); if (shm->addr) { xnheap_free(&shm->heapbase, shm->addr); xnheap_destroy_mapped(&shm->heapbase, NULL, NULL); shm->addr = NULL; shm->size = 0; } if (force) { xnholder_t *holder; while ((holder = getq(&shm->mappings))) { up(&shm->maplock); xnfree(link2map(holder)); down(&shm->maplock); } } up(&shm->maplock); xnlock_get_irqsave(&nklock, ignored); }
int rt_heap_free(RT_HEAP *heap, void *block) { int err, nwake; spl_t s; if (block == NULL) return -EINVAL; xnlock_get_irqsave(&nklock, s); heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP); if (!heap) { err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP); goto unlock_and_exit; } if (heap->mode & H_SINGLE) { /* No-op in single-block mode. */ err = 0; goto unlock_and_exit; } err = xnheap_free(&heap->heap_base, block); if (!err && xnsynch_nsleepers(&heap->synch_base) > 0) { xnpholder_t *holder, *nholder; nholder = getheadpq(xnsynch_wait_queue(&heap->synch_base)); nwake = 0; while ((holder = nholder) != NULL) { RT_TASK *sleeper = thread2rtask(link2thread(holder, plink)); void *block; block = xnheap_alloc(&heap->heap_base, sleeper->wait_args.heap.size); if (block) { nholder = xnsynch_wakeup_this_sleeper(&heap-> synch_base, holder); sleeper->wait_args.heap.block = block; nwake++; } else nholder = nextpq(xnsynch_wait_queue (&heap->synch_base), holder); } if (nwake > 0) xnpod_schedule(); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int _shm_free(unsigned long name) { xnholder_t *holder; xnshm_a_t *p; int ret; spl_t s; xnlock_get_irqsave(&nklock, s); holder = getheadq(&xnshm_allocq); while (holder != NULL) { p = link2shma(holder); if (p->name == name && --p->ref == 0) { removeq(&xnshm_allocq, &p->link); if (p->handle) xnregistry_remove(p->handle); xnlock_put_irqrestore(&nklock, s); if (p->heap == &kheap) xnheap_free(&kheap, p->chunk); else { #ifdef CONFIG_XENO_OPT_PERVASIVE xnheap_destroy_mapped(p->heap, __heap_flush_shared, NULL); #else /* !CONFIG_XENO_OPT_PERVASIVE */ xnheap_destroy(p->heap, &__heap_flush_private, NULL); xnheap_free(&kheap, p->heap); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ } ret = p->size; xnheap_free(&kheap, p); return ret; } holder = nextq(&xnshm_allocq, holder); } xnlock_put_irqrestore(&nklock, s); return 0; }
void __rtai_shm_pkg_cleanup(void) { #if 0 xnholder_t *holder; xnshm_a_t *p; char szName[6]; // Garbage collector : to be added : lock problem holder = getheadq(&xnshm_allocq); while (holder != NULL) { p = link2shma(holder); if (p) { num2nam(p->name, szName); printk ("[RTAI -SHM] Cleanup of unfreed memory %s( %d ref.)\n", szName, p->ref); if (p->heap == &kheap) xnheap_free(&kheap, p->chunk); else { /* FIXME: MUST release lock here. */ #ifdef CONFIG_XENO_OPT_PERVASIVE xnheap_destroy_mapped(p->heap, NULL, NULL); #else /* !CONFIG_XENO_OPT_PERVASIVE */ xnheap_destroy(p->heap, &__heap_flush_private, NULL); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ xnheap_free(&kheap, p->heap); } removeq(&xnshm_allocq, &p->link); xnheap_free(&kheap, p); } holder = nextq(&xnshm_allocq, holder); } #endif }
static xnshm_a_t *kalloc_new_shm(unsigned long name, int size) { xnshm_a_t *p; p = xnheap_alloc(&kheap, sizeof(xnshm_a_t)); if (!p) return NULL; p->chunk = xnheap_alloc(&kheap, size); if (!p->chunk) { xnheap_free(&kheap, p); return NULL; } memset(p->chunk, 0, size); inith(&p->link); p->ref = 1; p->name = name; p->size = size; p->heap = &kheap; return p; }
/** * Truncate a file or shared memory object to a specified length. * * When used in kernel-space, this service set to @a len the size of a shared * memory object opened with the shm_open() service. In user-space this service * falls back to Linux regular ftruncate service for file descriptors not * obtained with shm_open(). When this service is used to increase the size of a * shared memory object, the added space is zero-filled. * * Shared memory are suitable for direct memory access (allocated in physically * contiguous memory) if O_DIRECT was passed to shm_open. * * Shared memory objects may only be resized if they are not currently mapped. * * @param fd file descriptor; * * @param len new length of the underlying file or shared memory object. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EBADF, @a fd is not a valid file descriptor; * - EPERM, the caller context is invalid; * - EINVAL, the specified length is invalid; * - EINVAL, the architecture can not honour the O_DIRECT flag; * - EINTR, this service was interrupted by a signal; * - EBUSY, @a fd is a shared memory object descriptor and the underlying shared * memory is currently mapped; * - EFBIG, allocation of system memory failed. * * @par Valid contexts: * - kernel module initialization or cleanup routine; * - user-space thread (Xenomai threads switch to secondary mode). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/ftruncate.html"> * Specification.</a> * */ int ftruncate(int fd, off_t len) { unsigned desc_flags; pse51_desc_t *desc; pse51_shm_t *shm; int err; spl_t s; xnlock_get_irqsave(&nklock, s); shm = pse51_shm_get(&desc, fd, 1); if (IS_ERR(shm)) { err = -PTR_ERR(shm); xnlock_put_irqrestore(&nklock, s); goto error; } if (xnpod_asynch_p() || !xnpod_root_p()) { err = EPERM; xnlock_put_irqrestore(&nklock, s); goto err_shm_put; } if (len < 0) { err = EINVAL; xnlock_put_irqrestore(&nklock, s); goto err_shm_put; } desc_flags = pse51_desc_getflags(desc); xnlock_put_irqrestore(&nklock, s); if (down_interruptible(&shm->maplock)) { err = EINTR; goto err_shm_put; } /* Allocate one more page for alignment (the address returned by mmap must be aligned on a page boundary). */ if (len) #ifdef CONFIG_XENO_OPT_PERVASIVE len = xnheap_rounded_size(len + PAGE_SIZE, PAGE_SIZE); #else /* !CONFIG_XENO_OPT_PERVASIVE */ len = xnheap_rounded_size(len + PAGE_SIZE, XNHEAP_PAGE_SIZE); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ err = 0; if (emptyq_p(&shm->mappings)) { /* Temporary storage, in order to preserve the memory contents upon resizing, if possible. */ void *addr = NULL; size_t size = 0; if (shm->addr) { if (len == xnheap_extentsize(&shm->heapbase)) { /* Size unchanged, skip copy and reinit. */ err = 0; goto err_up; } size = xnheap_max_contiguous(&shm->heapbase); addr = xnarch_alloc_host_mem(size); if (!addr) { err = ENOMEM; goto err_up; } memcpy(addr, shm->addr, size); xnheap_free(&shm->heapbase, shm->addr); xnheap_destroy_mapped(&shm->heapbase, NULL, NULL); shm->addr = NULL; shm->size = 0; } if (len) { int flags = XNARCH_SHARED_HEAP_FLAGS | ((desc_flags & O_DIRECT) ? GFP_DMA : 0); err = -xnheap_init_mapped(&shm->heapbase, len, flags); if (err) goto err_up; xnheap_set_label(&shm->heapbase, "posix shm: %s", shm->nodebase.name); shm->size = xnheap_max_contiguous(&shm->heapbase); shm->addr = xnheap_alloc(&shm->heapbase, shm->size); /* Required. */ memset(shm->addr, '\0', shm->size); /* Copy the previous contents. */ if (addr) memcpy(shm->addr, addr, shm->size < size ? shm->size : size); shm->size -= PAGE_SIZE; } if (addr) xnarch_free_host_mem(addr, size); } else if (len != xnheap_extentsize(&shm->heapbase)) err = EBUSY; err_up: up(&shm->maplock); err_shm_put: pse51_shm_put(shm, 1); if (!err) return 0; error: thread_set_errno(err == ENOMEM ? EFBIG : err); return -1; }
static xnshm_a_t *create_new_heap(unsigned long name, int heapsize, int suprt) { xnshm_a_t *p; int err; p = xnheap_alloc(&kheap, sizeof(xnshm_a_t)); if (!p) return NULL; p->heap = xnheap_alloc(&kheap, sizeof(xnheap_t)); if (!p->heap) { xnheap_free(&kheap, p); return NULL; } /* * Account for the minimum heap size and overhead so that the * actual free space is large enough to match the requested * size. */ #ifdef CONFIG_XENO_OPT_PERVASIVE heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE); err = xnheap_init_mapped(p->heap, heapsize, suprt == USE_GFP_KERNEL ? GFP_KERNEL : 0); #else /* !CONFIG_XENO_OPT_PERVASIVE */ { void *heapmem; heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE); heapmem = xnarch_alloc_host_mem(heapsize); if (!heapmem) { err = -ENOMEM; } else { err = xnheap_init(p->heap, heapmem, heapsize, XNCORE_PAGE_SIZE); if (err) { xnarch_free_host_mem(heapmem, heapsize); } } } #endif /* !CONFIG_XENO_OPT_PERVASIVE */ if (err) { xnheap_free(&kheap, p->heap); xnheap_free(&kheap, p); return NULL; } p->chunk = xnheap_mapped_address(p->heap, 0); memset(p->chunk, 0, heapsize); inith(&p->link); p->ref = 1; p->name = name; p->size = heapsize; return p; }
static void __heap_flush_shared(xnheap_t *heap) { xnheap_free(&kheap, heap); }