void __exit cleanup_module(void) { // On nettoie toutes les tâches invaders_task_cleanup_task(); fb_task_cleanup_task(); io_task_cleanup_task(); hit_task_cleanup_task(); ship_task_cleanup_task(); // On nettoie tous les objets invaders_task_cleanup_objects(); fb_task_cleanup_objects(); io_task_cleanup_objects(); hit_task_cleanup_objects(); ship_task_cleanup_objects(); // On désalloue le tas if(heap_allocated){ heap_allocated = 0; rt_heap_free(&heap, &fb_mem_rt); } // On supprime le tas if(heap_created){ heap_created = 0; rt_heap_delete(&heap); } rt_intr_delete(&isrDesc); xeno_ts_exit(); }
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode) { int err; spl_t s; if (!xnpod_root_p()) return -EPERM; if (heapsize == 0) return -EINVAL; /* Make sure we won't hit trivial argument errors when calling xnheap_init(). */ heap->csize = heapsize; /* Record this for SBA management and inquiry. */ #ifdef __KERNEL__ if (mode & H_MAPPABLE) { if (!name || !*name) return -EINVAL; heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE); err = xnheap_init_mapped(&heap->heap_base, heapsize, ((mode & H_DMA) ? GFP_DMA : 0) | ((mode & H_DMA32) ? GFP_DMA32 : 0) | ((mode & H_NONCACHED) ? XNHEAP_GFP_NONCACHED : 0)); if (err) return err; heap->cpid = 0; } else #endif /* __KERNEL__ */ { void *heapmem; heapsize = xnheap_rounded_size(heapsize, XNHEAP_PAGE_SIZE); heapmem = xnarch_alloc_host_mem(heapsize); if (!heapmem) return -ENOMEM; err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNHEAP_PAGE_SIZE); if (err) { xnarch_free_host_mem(heapmem, heapsize); return err; } } xnheap_set_label(&heap->heap_base, "rt_heap: %s", name); xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO), NULL); heap->handle = 0; /* i.e. (still) unregistered heap. */ heap->magic = XENO_HEAP_MAGIC; heap->mode = mode; heap->sba = NULL; xnobject_copy_name(heap->name, name); inith(&heap->rlink); heap->rqueue = &xeno_get_rholder()->heapq; xnlock_get_irqsave(&nklock, s); appendq(heap->rqueue, &heap->rlink); xnlock_put_irqrestore(&nklock, s); /* * <!> Since xnregister_enter() may reschedule, only register * complete objects, so that the registry cannot return * handles to half-baked objects... */ if (name) { err = xnregistry_enter(heap->name, heap, &heap->handle, &__heap_pnode.node); if (err) rt_heap_delete(heap); } return err; }
int main(void) { unsigned long long before; RT_ALARM nalrm; RT_BUFFER nbuf; RT_COND ncond; RT_EVENT nevt; RT_HEAP nheap; RT_MUTEX nmtx; RT_PIPE npipe; RT_QUEUE nq; RT_SEM nsem; RT_TASK ntsk; int failed = 0; mlockall(MCL_CURRENT|MCL_FUTURE); rt_print_auto_init(1); rt_fprintf(stderr, "Checking for leaks in native skin services\n"); before = get_used(); check_native(rt_alarm_create(&nalrm, NULL)); check_native(rt_alarm_delete(&nalrm)); check_used("alarm", before, failed); before = get_used(); check_native(rt_buffer_create(&nbuf, NULL, 16384, B_PRIO)); check_native(rt_buffer_delete(&nbuf)); check_used("buffer", before, failed); before = get_used(); check_native(rt_cond_create(&ncond, NULL)); check_native(rt_cond_delete(&ncond)); check_used("cond", before, failed); before = get_used(); check_native(rt_event_create(&nevt, NULL, 0, EV_PRIO)); check_native(rt_event_delete(&nevt)); check_used("event", before, failed); before = get_used(); check_native(rt_heap_create(&nheap, "heap", 16384, H_PRIO | H_SHARED)); check_native(rt_heap_delete(&nheap)); check_used("heap", before, failed); before = get_used(); check_native(rt_mutex_create(&nmtx, NULL)); check_native(rt_mutex_delete(&nmtx)); check_used("mutex", before, failed); before = get_used(); check_native(rt_pipe_create(&npipe, NULL, P_MINOR_AUTO, 0)); check_native(rt_pipe_delete(&npipe)); check_used("pipe", before, failed); before = get_used(); check_native(rt_queue_create(&nq, "queue", 16384, Q_UNLIMITED, Q_PRIO)); check_native(rt_queue_delete(&nq)); check_used("queue", before, failed); before = get_used(); check_native(rt_sem_create(&nsem, NULL, 0, S_PRIO)); check_native(rt_sem_delete(&nsem)); check_used("sem", before, failed); before = get_used(); check_native(rt_task_spawn(&ntsk, NULL, 0, 1, T_JOINABLE, empty, NULL)); check_native(rt_task_join(&ntsk)); sleep(1); /* Leave some time for xnheap * deferred free */ check_used("task", before, failed); return failed ? EXIT_FAILURE : EXIT_SUCCESS; }
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode) { int err; spl_t s; if (!xnpod_root_p()) return -EPERM; if (heapsize == 0) return -EINVAL; /* Make sure we won't hit trivial argument errors when calling xnheap_init(). */ heap->csize = heapsize; /* Record this for SBA management and inquiry. */ #ifdef __KERNEL__ if (mode & H_MAPPABLE) { if (!name || !*name) return -EINVAL; #ifdef CONFIG_XENO_OPT_PERVASIVE heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE); err = xnheap_init_mapped(&heap->heap_base, heapsize, ((mode & H_DMA) ? GFP_DMA : 0) | ((mode & H_NONCACHED) ? XNHEAP_GFP_NONCACHED : 0)); if (err) return err; heap->cpid = 0; #else /* !CONFIG_XENO_OPT_PERVASIVE */ return -ENOSYS; #endif /* CONFIG_XENO_OPT_PERVASIVE */ } else #endif /* __KERNEL__ */ { void *heapmem; heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE); heapmem = xnarch_alloc_host_mem(heapsize); if (!heapmem) return -ENOMEM; err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNCORE_PAGE_SIZE); if (err) { xnarch_free_host_mem(heapmem, heapsize); return err; } } xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO)); heap->handle = 0; /* i.e. (still) unregistered heap. */ heap->magic = XENO_HEAP_MAGIC; heap->mode = mode; heap->sba = NULL; xnobject_copy_name(heap->name, name); inith(&heap->rlink); heap->rqueue = &xeno_get_rholder()->heapq; xnlock_get_irqsave(&nklock, s); appendq(heap->rqueue, &heap->rlink); xnlock_put_irqrestore(&nklock, s); #ifdef CONFIG_XENO_OPT_REGISTRY /* <!> Since xnregister_enter() may reschedule, only register complete objects, so that the registry cannot return handles to half-baked objects... */ if (name) { xnpnode_t *pnode = &__heap_pnode; if (!*name) { /* Since this is an anonymous object (empty name on entry) from user-space, it gets registered under an unique internal name but is not exported through /proc. */ xnobject_create_name(heap->name, sizeof(heap->name), (void *)heap); pnode = NULL; } err = xnregistry_enter(heap->name, heap, &heap->handle, pnode); if (err) rt_heap_delete(heap); } #endif /* CONFIG_XENO_OPT_REGISTRY */ return err; }