static int __sc_rblock(struct task_struct *curr, struct pt_regs *regs) { char __user *buf; vrtxpt_t *pt; int pid, err; spl_t s; pid = __xn_reg_arg1(regs); buf = (char __user *)__xn_reg_arg2(regs); xnlock_get_irqsave(&nklock, s); pt = xnmap_fetch(vrtx_pt_idmap, pid); if (!pt || pt->mm != curr->mm) { /* Deallocation requests must be issued from the same * process which created the partition. */ err = ER_ID; goto unlock_and_exit; } /* Convert the caller-based address of buf to the equivalent area into the kernel address space. */ if (buf) { buf = xnheap_mapped_address(pt->sysheap, (caddr_t) buf - pt->mapbase); sc_rblock(pid, buf, &err); } else err = ER_NMB; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static xnshm_a_t *create_new_heap(unsigned long name, int heapsize, int suprt) { xnshm_a_t *p; int err; p = xnheap_alloc(&kheap, sizeof(xnshm_a_t)); if (!p) return NULL; p->heap = xnheap_alloc(&kheap, sizeof(xnheap_t)); if (!p->heap) { xnheap_free(&kheap, p); return NULL; } /* * Account for the minimum heap size and overhead so that the * actual free space is large enough to match the requested * size. */ #ifdef CONFIG_XENO_OPT_PERVASIVE heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE); err = xnheap_init_mapped(p->heap, heapsize, suprt == USE_GFP_KERNEL ? GFP_KERNEL : 0); #else /* !CONFIG_XENO_OPT_PERVASIVE */ { void *heapmem; heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE); heapmem = xnarch_alloc_host_mem(heapsize); if (!heapmem) { err = -ENOMEM; } else { err = xnheap_init(p->heap, heapmem, heapsize, XNCORE_PAGE_SIZE); if (err) { xnarch_free_host_mem(heapmem, heapsize); } } } #endif /* !CONFIG_XENO_OPT_PERVASIVE */ if (err) { xnheap_free(&kheap, p->heap); xnheap_free(&kheap, p); return NULL; } p->chunk = xnheap_mapped_address(p->heap, 0); memset(p->chunk, 0, heapsize); inith(&p->link); p->ref = 1; p->name = name; p->size = heapsize; return p; }