int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode) { int ret = 0; spl_t s; if (xnpod_asynch_p()) return -EPERM; if (bufsz == 0) return -EINVAL; bf->bufmem = xnarch_alloc_host_mem(bufsz); if (bf->bufmem == NULL) return -ENOMEM; xnsynch_init(&bf->isynch_base, mode & B_PRIO, NULL); xnsynch_init(&bf->osynch_base, mode & B_PRIO, NULL); bf->handle = 0; /* i.e. (still) unregistered buffer. */ xnobject_copy_name(bf->name, name); inith(&bf->rlink); bf->rqueue = &xeno_get_rholder()->bufferq; xnlock_get_irqsave(&nklock, s); appendq(bf->rqueue, &bf->rlink); xnlock_put_irqrestore(&nklock, s); bf->mode = mode; bf->bufsz = bufsz; bf->rdoff = 0; bf->wroff = 0; bf->fillsz = 0; bf->rdtoken = 0; bf->wrtoken = 0; #ifndef __XENO_SIM__ bf->cpid = 0; #endif bf->magic = XENO_BUFFER_MAGIC; /* * <!> Since xnregister_enter() may reschedule, only register * complete objects, so that the registry cannot return * handles to half-baked objects... */ if (name) { ret = xnregistry_enter(bf->name, bf, &bf->handle, &__buffer_pnode.node); if (ret) rt_buffer_delete(bf); } return ret; }
int main(void) { unsigned long long before; RT_ALARM nalrm; RT_BUFFER nbuf; RT_COND ncond; RT_EVENT nevt; RT_HEAP nheap; RT_MUTEX nmtx; RT_PIPE npipe; RT_QUEUE nq; RT_SEM nsem; RT_TASK ntsk; int failed = 0; mlockall(MCL_CURRENT|MCL_FUTURE); rt_print_auto_init(1); rt_fprintf(stderr, "Checking for leaks in native skin services\n"); before = get_used(); check_native(rt_alarm_create(&nalrm, NULL)); check_native(rt_alarm_delete(&nalrm)); check_used("alarm", before, failed); before = get_used(); check_native(rt_buffer_create(&nbuf, NULL, 16384, B_PRIO)); check_native(rt_buffer_delete(&nbuf)); check_used("buffer", before, failed); before = get_used(); check_native(rt_cond_create(&ncond, NULL)); check_native(rt_cond_delete(&ncond)); check_used("cond", before, failed); before = get_used(); check_native(rt_event_create(&nevt, NULL, 0, EV_PRIO)); check_native(rt_event_delete(&nevt)); check_used("event", before, failed); before = get_used(); check_native(rt_heap_create(&nheap, "heap", 16384, H_PRIO | H_SHARED)); check_native(rt_heap_delete(&nheap)); check_used("heap", before, failed); before = get_used(); check_native(rt_mutex_create(&nmtx, NULL)); check_native(rt_mutex_delete(&nmtx)); check_used("mutex", before, failed); before = get_used(); check_native(rt_pipe_create(&npipe, NULL, P_MINOR_AUTO, 0)); check_native(rt_pipe_delete(&npipe)); check_used("pipe", before, failed); before = get_used(); check_native(rt_queue_create(&nq, "queue", 16384, Q_UNLIMITED, Q_PRIO)); check_native(rt_queue_delete(&nq)); check_used("queue", before, failed); before = get_used(); check_native(rt_sem_create(&nsem, NULL, 0, S_PRIO)); check_native(rt_sem_delete(&nsem)); check_used("sem", before, failed); before = get_used(); check_native(rt_task_spawn(&ntsk, NULL, 0, 1, T_JOINABLE, empty, NULL)); check_native(rt_task_join(&ntsk)); sleep(1); /* Leave some time for xnheap * deferred free */ check_used("task", before, failed); return failed ? EXIT_FAILURE : EXIT_SUCCESS; }