/* * Free the previously allocated space a of size units into the specified map. * Sort ``a'' into map and combine on one or both ends if possible. * Returns 0 on success, 1 on failure. */ void atefree(struct map *mp, size_t size, ulong_t a) { register struct map *bp; register unsigned int t; register unsigned long s; ASSERT(size >= 0); if (size == 0) return; bp = mapstart(mp); s = mutex_spinlock(maplock(mp)); for ( ; bp->m_addr<=a && bp->m_size!=0; bp++) ; if (bp>mapstart(mp) && (bp-1)->m_addr+(bp-1)->m_size == a) { (bp-1)->m_size += size; if (bp->m_addr) { /* m_addr==0 end of map table */ ASSERT(a+size <= bp->m_addr); if (a+size == bp->m_addr) { /* compress adjacent map addr entries */ (bp-1)->m_size += bp->m_size; while (bp->m_size) { bp++; (bp-1)->m_addr = bp->m_addr; (bp-1)->m_size = bp->m_size; } mapsize(mp)++; } } } else { if (a+size == bp->m_addr && bp->m_size) { bp->m_addr -= size; bp->m_size += size; } else { ASSERT(size); if (mapsize(mp) == 0) { mutex_spinunlock(maplock(mp), s); printk("atefree : map overflow 0x%p Lost 0x%lx items at 0x%lx", (void *)mp, size, a) ; return ; } do { t = bp->m_addr; bp->m_addr = a; a = t; t = bp->m_size; bp->m_size = size; bp++; } while ((size = t)); mapsize(mp)--; } } mutex_spinunlock(maplock(mp), s); /* * wake up everyone waiting for space */ if (mapout(mp)) ; /* sv_broadcast(mapout(mp)); */ }
/* * Perform I/O to a given process. This will return EIO if we dectect * corrupt memory and ENXIO if there is no such mapped address in the * user process's address space. */ static int urw(proc_t *p, int writing, void *buf, size_t len, uintptr_t a) { caddr_t addr = (caddr_t)a; caddr_t page; caddr_t vaddr; struct seg *seg; int error = 0; int err = 0; uint_t prot; uint_t prot_rw = writing ? PROT_WRITE : PROT_READ; int protchanged; on_trap_data_t otd; int retrycnt; struct as *as = p->p_as; enum seg_rw rw; /* * Locate segment containing address of interest. */ page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK); retrycnt = 0; AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); retry: if ((seg = as_segat(as, page)) == NULL || !page_valid(seg, page)) { AS_LOCK_EXIT(as, &as->a_lock); return (ENXIO); } SEGOP_GETPROT(seg, page, 0, &prot); protchanged = 0; if ((prot & prot_rw) == 0) { protchanged = 1; err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | prot_rw); if (err == IE_RETRY) { protchanged = 0; ASSERT(retrycnt == 0); retrycnt++; goto retry; } if (err != 0) { AS_LOCK_EXIT(as, &as->a_lock); return (ENXIO); } } /* * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break * sharing to avoid a copy on write of a softlocked page by another * thread. But since we locked the address space as a writer no other * thread can cause a copy on write. S_READ_NOCOW is passed as the * access type to tell segvn that it's ok not to do a copy-on-write * for this SOFTLOCK fault. */ if (writing) rw = S_WRITE; else if (seg->s_ops == &segvn_ops) rw = S_READ_NOCOW; else rw = S_READ; if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) { if (protchanged) (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); AS_LOCK_EXIT(as, &as->a_lock); return (ENXIO); } CPU_STATS_ADD_K(vm, softlock, 1); /* * Make sure we're not trying to read or write off the end of the page. */ ASSERT(len <= page + PAGESIZE - addr); /* * Map in the locked page, copy to our local buffer, * then map the page out and unlock it. */ vaddr = mapin(as, addr, writing); /* * Since we are copying memory on behalf of the user process, * protect against memory error correction faults. */ if (!on_trap(&otd, OT_DATA_EC)) { if (seg->s_ops == &segdev_ops) { /* * Device memory can behave strangely; invoke * a segdev-specific copy operation instead. */ if (writing) { if (segdev_copyto(seg, addr, buf, vaddr, len)) error = ENXIO; } else { if (segdev_copyfrom(seg, addr, vaddr, buf, len)) error = ENXIO; } } else { if (writing) bcopy(buf, vaddr, len); else bcopy(vaddr, buf, len); } } else { error = EIO; } no_trap(); /* * If we're writing to an executable page, we may need to sychronize * the I$ with the modifications we made through the D$. */ if (writing && (prot & PROT_EXEC)) sync_icache(vaddr, (uint_t)len); mapout(as, addr, vaddr, writing); if (rw == S_READ_NOCOW) rw = S_READ; (void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw); if (protchanged) (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); AS_LOCK_EXIT(as, &as->a_lock); return (error); }