/* * Send a signal to segment associated with access permit */ int xpmem_signal(xpmem_apid_t apid) { struct xpmem_thread_group * ap_tg, * seg_tg; struct xpmem_access_permit * ap; struct xpmem_segment * seg; int ret; if (apid <= 0) return -EINVAL; ap_tg = xpmem_tg_ref_by_apid(apid); if (IS_ERR(ap_tg)) return PTR_ERR(ap_tg); ap = xpmem_ap_ref_by_apid(ap_tg, apid); if (IS_ERR(ap)) { xpmem_tg_deref(ap_tg); return PTR_ERR(ap); } seg = ap->seg; xpmem_seg_ref(seg); seg_tg = seg->tg; xpmem_tg_ref(seg_tg); xpmem_seg_down(seg); if (!(seg->flags & XPMEM_FLAG_SIGNALLABLE)) { ret = -EACCES; goto out; } /* Send signal */ if (seg->flags & XPMEM_FLAG_SHADOW) { /* Shadow segment */ ret = xpmem_irq_deliver( seg->segid, *((xpmem_sigid_t *)&(seg->sig)), seg->domid); } else { /* Local segment */ xpmem_seg_signal(seg); } ret = 0; out: xpmem_seg_up(seg); xpmem_seg_deref(seg); xpmem_tg_deref(seg_tg); xpmem_ap_deref(ap); xpmem_tg_deref(ap_tg); return ret; }
/* * Attach a XPMEM address segment. */ int xpmem_attach(xpmem_apid_t apid, off_t offset, size_t size, int att_flags, vaddr_t * at_vaddr_p) { int ret, index; vaddr_t seg_vaddr, at_vaddr; struct xpmem_thread_group *ap_tg, *seg_tg; struct xpmem_access_permit *ap; struct xpmem_segment *seg; struct xpmem_attachment *att; if (apid <= 0) return -EINVAL; /* If the size is not page aligned, fix it */ if (offset_in_page(size) != 0) size += PAGE_SIZE - offset_in_page(size); ap_tg = xpmem_tg_ref_by_apid(apid); if (IS_ERR(ap_tg)) return PTR_ERR(ap_tg); ap = xpmem_ap_ref_by_apid(ap_tg, apid); if (IS_ERR(ap)) { xpmem_tg_deref(ap_tg); return PTR_ERR(ap); } seg = ap->seg; xpmem_seg_ref(seg); seg_tg = seg->tg; xpmem_tg_ref(seg_tg); xpmem_seg_down(seg); ret = xpmem_validate_access(ap_tg, ap, offset, size, XPMEM_RDWR, &seg_vaddr); if (ret != 0) goto out_1; /* size needs to reflect page offset to start of segment */ size += offset_in_page(seg_vaddr); if (seg->flags & XPMEM_FLAG_SHADOW) { BUG_ON(seg->remote_apid <= 0); /* remote - load pfns in now */ ret = xpmem_try_attach_remote(seg->segid, seg->remote_apid, offset, size, &at_vaddr); if (ret != 0) goto out_1; } else { /* not remote - simply figure out where we are smartmapped to this process */ at_vaddr = xpmem_make_smartmap_addr(seg_tg->aspace->id, seg_vaddr); } /* create new attach structure */ att = kmem_alloc(sizeof(struct xpmem_attachment)); if (att == NULL) { ret = -ENOMEM; goto out_1; } mutex_init(&att->mutex); att->vaddr = seg_vaddr; att->at_vaddr = at_vaddr; att->at_size = size; att->ap = ap; att->flags = 0; INIT_LIST_HEAD(&att->att_node); xpmem_att_not_destroyable(att); xpmem_att_ref(att); /* * The attach point where we mapped the portion of the segment the * user was interested in is page aligned. But the start of the portion * of the segment may not be, so we adjust the address returned to the * user by that page offset difference so that what they see is what * they expected to see. */ *at_vaddr_p = at_vaddr + offset_in_page(att->vaddr); /* link attach structure to its access permit's att list */ spin_lock(&ap->lock); if (ap->flags & XPMEM_FLAG_DESTROYING) { spin_unlock(&ap->lock); ret = -ENOENT; goto out_2; } list_add_tail(&att->att_node, &ap->att_list); /* add att to its ap_tg's hash list */ index = xpmem_att_hashtable_index(att->at_vaddr); write_lock(&ap_tg->att_hashtable[index].lock); list_add_tail(&att->att_hashnode, &ap_tg->att_hashtable[index].list); write_unlock(&ap_tg->att_hashtable[index].lock); spin_unlock(&ap->lock); ret = 0; out_2: if (ret != 0) { att->flags |= XPMEM_FLAG_DESTROYING; xpmem_att_destroyable(att); } xpmem_att_deref(att); out_1: xpmem_seg_up(seg); xpmem_ap_deref(ap); xpmem_tg_deref(ap_tg); xpmem_seg_deref(seg); xpmem_tg_deref(seg_tg); return ret; }