int rt_event_wait_inner(RT_EVENT *event, unsigned long mask, unsigned long *mask_r, int mode, xntmode_t timeout_mode, RTIME timeout) { RT_TASK *task; xnflags_t info; int err = 0; spl_t s; xnlock_get_irqsave(&nklock, s); event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT); if (!event) { err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT); goto unlock_and_exit; } if (!mask) { *mask_r = event->value; goto unlock_and_exit; } if (timeout == TM_NONBLOCK) { unsigned long bits = (event->value & mask); *mask_r = bits; if (mode & EV_ANY) { if (!bits) err = -EWOULDBLOCK; } else if (bits != mask) err = -EWOULDBLOCK; goto unlock_and_exit; } if (((mode & EV_ANY) && (mask & event->value) != 0) || (!(mode & EV_ANY) && ((mask & event->value) == mask))) { *mask_r = (event->value & mask); goto unlock_and_exit; } if (xnpod_unblockable_p()) { err = -EPERM; goto unlock_and_exit; } task = xeno_current_task(); task->wait_args.event.mode = mode; task->wait_args.event.mask = mask; info = xnsynch_sleep_on(&event->synch_base, timeout, timeout_mode); if (info & XNRMID) err = -EIDRM; /* Event group deleted while pending. */ else if (info & XNTIMEO) err = -ETIMEDOUT; /* Timeout. */ else if (info & XNBREAK) err = -EINTR; /* Unblocked. */ /* * The returned mask is only significant if the operation has * succeeded, but do always write it back anyway. */ *mask_r = task->wait_args.event.mask; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
int rt_heap_alloc(RT_HEAP *heap, size_t size, RTIME timeout, void **blockp) { void *block = NULL; RT_TASK *task; int err = 0; spl_t s; xnlock_get_irqsave(&nklock, s); heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP); if (!heap) { err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP); goto unlock_and_exit; } /* In single-block mode, there is only a single allocation returning the whole addressable heap space to the user. All users referring to this heap are then returned the same block. */ if (heap->mode & H_SINGLE) { block = heap->sba; if (!block) { /* It's ok to pass zero for size here, since the requested size is implicitely the whole heap space; but if non-zero is given, it must match the original heap size. */ if (size > 0 && size != heap->csize) { err = -EINVAL; goto unlock_and_exit; } block = heap->sba = xnheap_alloc(&heap->heap_base, xnheap_max_contiguous (&heap->heap_base)); } if (block) goto unlock_and_exit; err = -ENOMEM; /* This should never happen. Paranoid. */ goto unlock_and_exit; } block = xnheap_alloc(&heap->heap_base, size); if (block) goto unlock_and_exit; if (timeout == TM_NONBLOCK) { err = -EWOULDBLOCK; goto unlock_and_exit; } if (xnpod_unblockable_p()) { err = -EPERM; goto unlock_and_exit; } task = xeno_current_task(); task->wait_args.heap.size = size; task->wait_args.heap.block = NULL; xnsynch_sleep_on(&heap->synch_base, timeout, XN_RELATIVE); if (xnthread_test_info(&task->thread_base, XNRMID)) err = -EIDRM; /* Heap deleted while pending. */ else if (xnthread_test_info(&task->thread_base, XNTIMEO)) err = -ETIMEDOUT; /* Timeout. */ else if (xnthread_test_info(&task->thread_base, XNBREAK)) err = -EINTR; /* Unblocked. */ else block = task->wait_args.heap.block; unlock_and_exit: *blockp = block; xnlock_put_irqrestore(&nklock, s); return err; }