/* * Detach an attached XPMEM address segment. */ int xpmem_detach(vaddr_t at_vaddr) { struct xpmem_thread_group *tg; struct xpmem_access_permit *ap; struct xpmem_attachment *att; tg = xpmem_tg_ref_by_gid(current->aspace->id); if (IS_ERR(tg)) return PTR_ERR(tg); att = xpmem_att_ref_by_vaddr(tg, at_vaddr); if (IS_ERR(att)) { xpmem_tg_deref(tg); return PTR_ERR(att); } mutex_lock(&att->mutex); if (att->flags & XPMEM_FLAG_DESTROYING) { mutex_unlock(&att->mutex); xpmem_att_deref(att); xpmem_tg_deref(tg); return 0; } att->flags |= XPMEM_FLAG_DESTROYING; ap = att->ap; xpmem_ap_ref(ap); if (current->aspace->id != ap->tg->gid) { att->flags &= ~XPMEM_FLAG_DESTROYING; xpmem_ap_deref(ap); mutex_unlock(&att->mutex); xpmem_att_deref(att); return -EACCES; } __xpmem_detach_att(ap, att); mutex_unlock(&att->mutex); xpmem_att_destroyable(att); xpmem_ap_deref(ap); xpmem_att_deref(att); xpmem_tg_deref(tg); return 0; }
/* * Attach a XPMEM address segment. */ int xpmem_attach(xpmem_apid_t apid, off_t offset, size_t size, int att_flags, vaddr_t * at_vaddr_p) { int ret, index; vaddr_t seg_vaddr, at_vaddr; struct xpmem_thread_group *ap_tg, *seg_tg; struct xpmem_access_permit *ap; struct xpmem_segment *seg; struct xpmem_attachment *att; if (apid <= 0) return -EINVAL; /* If the size is not page aligned, fix it */ if (offset_in_page(size) != 0) size += PAGE_SIZE - offset_in_page(size); ap_tg = xpmem_tg_ref_by_apid(apid); if (IS_ERR(ap_tg)) return PTR_ERR(ap_tg); ap = xpmem_ap_ref_by_apid(ap_tg, apid); if (IS_ERR(ap)) { xpmem_tg_deref(ap_tg); return PTR_ERR(ap); } seg = ap->seg; xpmem_seg_ref(seg); seg_tg = seg->tg; xpmem_tg_ref(seg_tg); xpmem_seg_down(seg); ret = xpmem_validate_access(ap_tg, ap, offset, size, XPMEM_RDWR, &seg_vaddr); if (ret != 0) goto out_1; /* size needs to reflect page offset to start of segment */ size += offset_in_page(seg_vaddr); if (seg->flags & XPMEM_FLAG_SHADOW) { BUG_ON(seg->remote_apid <= 0); /* remote - load pfns in now */ ret = xpmem_try_attach_remote(seg->segid, seg->remote_apid, offset, size, &at_vaddr); if (ret != 0) goto out_1; } else { /* not remote - simply figure out where we are smartmapped to this process */ at_vaddr = xpmem_make_smartmap_addr(seg_tg->aspace->id, seg_vaddr); } /* create new attach structure */ att = kmem_alloc(sizeof(struct xpmem_attachment)); if (att == NULL) { ret = -ENOMEM; goto out_1; } mutex_init(&att->mutex); att->vaddr = seg_vaddr; att->at_vaddr = at_vaddr; att->at_size = size; att->ap = ap; att->flags = 0; INIT_LIST_HEAD(&att->att_node); xpmem_att_not_destroyable(att); xpmem_att_ref(att); /* * The attach point where we mapped the portion of the segment the * user was interested in is page aligned. But the start of the portion * of the segment may not be, so we adjust the address returned to the * user by that page offset difference so that what they see is what * they expected to see. */ *at_vaddr_p = at_vaddr + offset_in_page(att->vaddr); /* link attach structure to its access permit's att list */ spin_lock(&ap->lock); if (ap->flags & XPMEM_FLAG_DESTROYING) { spin_unlock(&ap->lock); ret = -ENOENT; goto out_2; } list_add_tail(&att->att_node, &ap->att_list); /* add att to its ap_tg's hash list */ index = xpmem_att_hashtable_index(att->at_vaddr); write_lock(&ap_tg->att_hashtable[index].lock); list_add_tail(&att->att_hashnode, &ap_tg->att_hashtable[index].list); write_unlock(&ap_tg->att_hashtable[index].lock); spin_unlock(&ap->lock); ret = 0; out_2: if (ret != 0) { att->flags |= XPMEM_FLAG_DESTROYING; xpmem_att_destroyable(att); } xpmem_att_deref(att); out_1: xpmem_seg_up(seg); xpmem_ap_deref(ap); xpmem_tg_deref(ap_tg); xpmem_seg_deref(seg); xpmem_tg_deref(seg_tg); return ret; }
/* * Release an access permit and detach all associated attaches. */ void xpmem_release_ap(struct xpmem_thread_group *ap_tg, struct xpmem_access_permit *ap) { int index; struct xpmem_thread_group *seg_tg; struct xpmem_attachment *att; struct xpmem_segment *seg; spin_lock(&ap->lock); if (ap->flags & XPMEM_FLAG_DESTROYING) { spin_unlock(&ap->lock); return; } ap->flags |= XPMEM_FLAG_DESTROYING; /* deal with all attaches first */ while (!list_empty(&ap->att_list)) { att = list_entry((&ap->att_list)->next, struct xpmem_attachment, att_node); xpmem_att_ref(att); spin_unlock(&ap->lock); xpmem_detach_att(ap, att); xpmem_att_deref(att); spin_lock(&ap->lock); } spin_unlock(&ap->lock); /* * Remove access structure from its hash list. * This is done after the xpmem_detach_att to prevent any racing * thread from looking up access permits for the owning thread group * and not finding anything, assuming everything is clean, and * freeing the mm before xpmem_detach_att has a chance to * use it. */ index = xpmem_ap_hashtable_index(ap->apid); write_lock(&ap_tg->ap_hashtable[index].lock); list_del_init(&ap->ap_hashnode); write_unlock(&ap_tg->ap_hashtable[index].lock); /* the ap's seg and the seg's tg were ref'd in xpmem_get() */ seg = ap->seg; seg_tg = seg->tg; /* remove ap from its seg's access permit list */ spin_lock(&seg->lock); list_del_init(&ap->ap_node); spin_unlock(&seg->lock); /* Try to teardown a shadow segment */ if (seg->flags & XPMEM_FLAG_SHADOW) xpmem_remove_seg(seg_tg, seg); xpmem_seg_deref(seg); /* deref of xpmem_get()'s ref */ xpmem_tg_deref(seg_tg); /* deref of xpmem_get()'s ref */ xpmem_ap_destroyable(ap); }