Esempio n. 1
0
/*
 * Common code for reading a buffer with various options
 *
 * Read in (if necessary) the block and return a buffer pointer.
 */
struct buf *
bread_common(void *arg, dev_t dev, daddr_t blkno, long bsize)
{
	struct ufsvfs *ufsvfsp = (struct ufsvfs *)arg;
	struct buf *bp;
	klwp_t *lwp = ttolwp(curthread);

	CPU_STATS_ADD_K(sys, lread, 1);
	bp = getblk_common(ufsvfsp, dev, blkno, bsize, /* errflg */ 1);
	if (bp->b_flags & B_DONE)
		return (bp);
	bp->b_flags |= B_READ;
	ASSERT(bp->b_bcount == bsize);
	if (ufsvfsp == NULL) {					/* !ufs */
		(void) bdev_strategy(bp);
	} else if (ufsvfsp->vfs_log && bio_lufs_strategy != NULL) {
							/* ufs && logging */
		(*bio_lufs_strategy)(ufsvfsp->vfs_log, bp);
	} else if (ufsvfsp->vfs_snapshot && bio_snapshot_strategy != NULL) {
							/* ufs && snapshots */
		(*bio_snapshot_strategy)(&ufsvfsp->vfs_snapshot, bp);
	} else {
		ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
		ub.ub_breads.value.ul++;		/* ufs && !logging */
		(void) bdev_strategy(bp);
	}
	if (lwp != NULL)
		lwp->lwp_ru.inblock++;
	CPU_STATS_ADD_K(sys, bread, 1);
	(void) biowait(bp);
	return (bp);
}
Esempio n. 2
0
/*
 * Read in the block, like bread, but also start I/O on the
 * read-ahead block (which is not allocated to the caller).
 */
struct buf *
breada(dev_t dev, daddr_t blkno, daddr_t rablkno, long bsize)
{
	struct buf *bp, *rabp;
	klwp_t *lwp = ttolwp(curthread);

	bp = NULL;
	if (!bio_incore(dev, blkno)) {
		CPU_STATS_ADD_K(sys, lread, 1);
		bp = GETBLK(dev, blkno, bsize);
		if ((bp->b_flags & B_DONE) == 0) {
			bp->b_flags |= B_READ;
			bp->b_bcount = bsize;
			(void) bdev_strategy(bp);
			if (lwp != NULL)
				lwp->lwp_ru.inblock++;
			CPU_STATS_ADD_K(sys, bread, 1);
		}
	}
	if (rablkno && bfreelist.b_bcount > 1 &&
	    !bio_incore(dev, rablkno)) {
		rabp = GETBLK(dev, rablkno, bsize);
		if (rabp->b_flags & B_DONE)
			brelse(rabp);
		else {
			rabp->b_flags |= B_READ|B_ASYNC;
			rabp->b_bcount = bsize;
			(void) bdev_strategy(rabp);
			if (lwp != NULL)
				lwp->lwp_ru.inblock++;
			CPU_STATS_ADD_K(sys, bread, 1);
		}
	}
	if (bp == NULL)
		return (BREAD(dev, blkno, bsize));
	(void) biowait(bp);
	return (bp);
}
Esempio n. 3
0
/*
 * Release the buffer, marking it so that if it is grabbed
 * for another purpose it will be written out before being
 * given up (e.g. when writing a partial block where it is
 * assumed that another write for the same block will soon follow).
 * Also save the time that the block is first marked as delayed
 * so that it will be written in a reasonable time.
 */
void
bdwrite(struct buf *bp)
{
	ASSERT(SEMA_HELD(&bp->b_sem));
	CPU_STATS_ADD_K(sys, lwrite, 1);
	if ((bp->b_flags & B_DELWRI) == 0)
		bp->b_start = ddi_get_lbolt();
	/*
	 * B_DONE allows others to use the buffer, B_DELWRI causes the
	 * buffer to be written before being reused, and setting b_resid
	 * to zero says the buffer is complete.
	 */
	bp->b_flags |= B_DELWRI | B_DONE;
	bp->b_resid = 0;
	brelse(bp);
}
Esempio n. 4
0
/*
 * Perform I/O to a given process. This will return EIO if we dectect
 * corrupt memory and ENXIO if there is no such mapped address in the
 * user process's address space.
 */
static int
urw(proc_t *p, int writing, void *buf, size_t len, uintptr_t a)
{
	caddr_t addr = (caddr_t)a;
	caddr_t page;
	caddr_t vaddr;
	struct seg *seg;
	int error = 0;
	int err = 0;
	uint_t prot;
	uint_t prot_rw = writing ? PROT_WRITE : PROT_READ;
	int protchanged;
	on_trap_data_t otd;
	int retrycnt;
	struct as *as = p->p_as;
	enum seg_rw rw;

	/*
	 * Locate segment containing address of interest.
	 */
	page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK);
	retrycnt = 0;
	AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
retry:
	if ((seg = as_segat(as, page)) == NULL ||
	    !page_valid(seg, page)) {
		AS_LOCK_EXIT(as, &as->a_lock);
		return (ENXIO);
	}
	SEGOP_GETPROT(seg, page, 0, &prot);

	protchanged = 0;
	if ((prot & prot_rw) == 0) {
		protchanged = 1;
		err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | prot_rw);

		if (err == IE_RETRY) {
			protchanged = 0;
			ASSERT(retrycnt == 0);
			retrycnt++;
			goto retry;
		}

		if (err != 0) {
			AS_LOCK_EXIT(as, &as->a_lock);
			return (ENXIO);
		}
	}

	/*
	 * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break
	 * sharing to avoid a copy on write of a softlocked page by another
	 * thread. But since we locked the address space as a writer no other
	 * thread can cause a copy on write. S_READ_NOCOW is passed as the
	 * access type to tell segvn that it's ok not to do a copy-on-write
	 * for this SOFTLOCK fault.
	 */
	if (writing)
		rw = S_WRITE;
	else if (seg->s_ops == &segvn_ops)
		rw = S_READ_NOCOW;
	else
		rw = S_READ;

	if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) {
		if (protchanged)
			(void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);
		AS_LOCK_EXIT(as, &as->a_lock);
		return (ENXIO);
	}
	CPU_STATS_ADD_K(vm, softlock, 1);

	/*
	 * Make sure we're not trying to read or write off the end of the page.
	 */
	ASSERT(len <= page + PAGESIZE - addr);

	/*
	 * Map in the locked page, copy to our local buffer,
	 * then map the page out and unlock it.
	 */
	vaddr = mapin(as, addr, writing);

	/*
	 * Since we are copying memory on behalf of the user process,
	 * protect against memory error correction faults.
	 */
	if (!on_trap(&otd, OT_DATA_EC)) {
		if (seg->s_ops == &segdev_ops) {
			/*
			 * Device memory can behave strangely; invoke
			 * a segdev-specific copy operation instead.
			 */
			if (writing) {
				if (segdev_copyto(seg, addr, buf, vaddr, len))
					error = ENXIO;
			} else {
				if (segdev_copyfrom(seg, addr, vaddr, buf, len))
					error = ENXIO;
			}
		} else {
			if (writing)
				bcopy(buf, vaddr, len);
			else
				bcopy(vaddr, buf, len);
		}
	} else {
		error = EIO;
	}
	no_trap();

	/*
	 * If we're writing to an executable page, we may need to sychronize
	 * the I$ with the modifications we made through the D$.
	 */
	if (writing && (prot & PROT_EXEC))
		sync_icache(vaddr, (uint_t)len);

	mapout(as, addr, vaddr, writing);

	if (rw == S_READ_NOCOW)
		rw = S_READ;

	(void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw);

	if (protchanged)
		(void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);

	AS_LOCK_EXIT(as, &as->a_lock);

	return (error);
}