void cobalt_umm_destroy(struct cobalt_umm *umm) { secondary_mode_only(); if (atomic_dec_and_test(&umm->refcount)) { xnheap_destroy(&umm->heap); vfree(xnheap_get_membase(&umm->heap)); if (umm->release) umm->release(umm); } }
static int _shm_free(unsigned long name) { int ret = 0; xnholder_t *holder; xnshm_a_t *p; spl_t s; xnlock_get_irqsave(&nklock, s); holder = getheadq(&xnshm_allocq); while (holder != NULL) { p = link2shma(holder); if (p->name == name && --p->ref == 0) { #ifdef CONFIG_XENO_OPT_REGISTRY if (p->handle) xnregistry_remove(p->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ if (p->heap == &kheap) xnheap_free(&kheap, p->chunk); else { /* Should release lock here? * Can destroy_mapped suspend ? * [YES!] */ #ifdef CONFIG_XENO_OPT_PERVASIVE ret = xnheap_destroy_mapped(p->heap, NULL, NULL); #else /* !CONFIG_XENO_OPT_PERVASIVE */ ret = xnheap_destroy(p->heap, &__heap_flush_private, NULL); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ if (ret) goto unlock_and_exit; xnheap_free(&kheap, p->heap); } removeq(&xnshm_allocq, &p->link); ret = p->size; xnheap_free(&kheap, p); break; } holder = nextq(&xnshm_allocq, holder); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return ret; }
int rt_heap_delete_inner(RT_HEAP *heap, void __user *mapaddr) { int err = 0; spl_t s; if (!xnpod_root_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP); if (!heap) { err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP); xnlock_put_irqrestore(&nklock, s); return err; } xeno_mark_deleted(heap); /* Get out of the nklocked section before releasing the heap memory, since we are about to invoke Linux kernel services. */ xnlock_put_irqrestore(&nklock, s); /* * The heap descriptor has been marked as deleted before we * released the superlock thus preventing any sucessful * subsequent calls of rt_heap_delete(), so now we can * actually destroy it safely. */ #ifdef CONFIG_XENO_OPT_PERVASIVE if (heap->mode & H_MAPPABLE) err = xnheap_destroy_mapped(&heap->heap_base, __heap_post_release, mapaddr); else #endif /* CONFIG_XENO_OPT_PERVASIVE */ err = xnheap_destroy(&heap->heap_base, &__heap_flush_private, NULL); xnlock_get_irqsave(&nklock, s); if (err) heap->magic = XENO_HEAP_MAGIC; else if (!(heap->mode & H_MAPPABLE)) __heap_post_release(&heap->heap_base); xnlock_put_irqrestore(&nklock, s); return err; }
static int _shm_free(unsigned long name) { xnholder_t *holder; xnshm_a_t *p; int ret; spl_t s; xnlock_get_irqsave(&nklock, s); holder = getheadq(&xnshm_allocq); while (holder != NULL) { p = link2shma(holder); if (p->name == name && --p->ref == 0) { removeq(&xnshm_allocq, &p->link); if (p->handle) xnregistry_remove(p->handle); xnlock_put_irqrestore(&nklock, s); if (p->heap == &kheap) xnheap_free(&kheap, p->chunk); else { #ifdef CONFIG_XENO_OPT_PERVASIVE xnheap_destroy_mapped(p->heap, __heap_flush_shared, NULL); #else /* !CONFIG_XENO_OPT_PERVASIVE */ xnheap_destroy(p->heap, &__heap_flush_private, NULL); xnheap_free(&kheap, p->heap); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ } ret = p->size; xnheap_free(&kheap, p); return ret; } holder = nextq(&xnshm_allocq, holder); } xnlock_put_irqrestore(&nklock, s); return 0; }
int rt_heap_delete_inner(RT_HEAP *heap, void __user *mapaddr) { int err; spl_t s; if (!xnpod_root_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP); if (!heap) { err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP); xnlock_put_irqrestore(&nklock, s); return err; } xeno_mark_deleted(heap); /* Get out of the nklocked section before releasing the heap memory, since we are about to invoke Linux kernel services. */ xnlock_put_irqrestore(&nklock, s); /* * The heap descriptor has been marked as deleted before we * released the superlock thus preventing any subsequent call * to rt_heap_delete() to succeed, so now we can actually * destroy it safely. */ #ifndef __XENO_SIM__ if (heap->mode & H_MAPPABLE) xnheap_destroy_mapped(&heap->heap_base, __heap_post_release, mapaddr); else #endif { xnheap_destroy(&heap->heap_base, &__heap_flush_private, NULL); __heap_post_release(&heap->heap_base); } return 0; }
void __rtai_shm_pkg_cleanup(void) { #if 0 xnholder_t *holder; xnshm_a_t *p; char szName[6]; // Garbage collector : to be added : lock problem holder = getheadq(&xnshm_allocq); while (holder != NULL) { p = link2shma(holder); if (p) { num2nam(p->name, szName); printk ("[RTAI -SHM] Cleanup of unfreed memory %s( %d ref.)\n", szName, p->ref); if (p->heap == &kheap) xnheap_free(&kheap, p->chunk); else { /* FIXME: MUST release lock here. */ #ifdef CONFIG_XENO_OPT_PERVASIVE xnheap_destroy_mapped(p->heap, NULL, NULL); #else /* !CONFIG_XENO_OPT_PERVASIVE */ xnheap_destroy(p->heap, &__heap_flush_private, NULL); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ xnheap_free(&kheap, p->heap); } removeq(&xnshm_allocq, &p->link); xnheap_free(&kheap, p); } holder = nextq(&xnshm_allocq, holder); } #endif }