Beispiel #1
0
static int
lqfs_initialize(qfsvfs_t *qfsvfsp, daddr_t bno, int ord, size_t nb,
    struct fiolog *flp)
{
	ml_odunit_t	*ud, *ud2;
	buf_t		*bp;
	timeval_lqfs_common_t tv;
	int error = 0;

	/* LINTED: warning: logical expression always true: op "||" */
	ASSERT(sizeof (ml_odunit_t) < DEV_BSIZE);
	ASSERT(nb >= ldl_minlogsize);

	bp = QFS_GETBLK(qfsvfsp, qfsvfsp->mi.m_fs[ord].dev, bno,
	    dbtob(LS_SECTORS));
	bzero(bp->b_un.b_addr, bp->b_bcount);

	ud = (void *)bp->b_un.b_addr;
	ud->od_version = LQFS_VERSION_LATEST;
	ud->od_maxtransfer = MIN(VFS_IOTRANSZ(qfsvfsp), ldl_maxtransfer);
	if (ud->od_maxtransfer < ldl_mintransfer) {
		ud->od_maxtransfer = ldl_mintransfer;
	}
	ud->od_devbsize = DEV_BSIZE;

	ud->od_requestsize = flp->nbytes_actual;
	ud->od_statesize = dbtob(LS_SECTORS);
	ud->od_logsize = nb - ud->od_statesize;

	ud->od_statebno = INT32_C(0);

	uniqtime(&tv);
	if (tv.tv_usec == last_loghead_ident) {
		tv.tv_usec++;
	}
	last_loghead_ident = tv.tv_usec;
	ud->od_head_ident = tv.tv_usec;
	ud->od_tail_ident = ud->od_head_ident;
	ud->od_chksum = ud->od_head_ident + ud->od_tail_ident;

	ud->od_bol_lof = dbtob(ud->od_statebno) + ud->od_statesize;
	ud->od_eol_lof = ud->od_bol_lof + ud->od_logsize;
	ud->od_head_lof = ud->od_bol_lof;
	ud->od_tail_lof = ud->od_bol_lof;

	ASSERT(lqfs_initialize_debug(ud));

	ml_odunit_validate(ud);

	ud2 = (void *)(bp->b_un.b_addr + DEV_BSIZE);
	bcopy(ud, ud2, sizeof (*ud));

	if ((error = SAM_BWRITE2(qfsvfsp, bp)) != 0) {
		brelse(bp);
		return (error);
	}
	brelse(bp);

	return (0);
}
Beispiel #2
0
static int swapmode(int *retavail, int *retfree)
{
	int n;
	struct swapent *sep;

	*retavail = 0;
	*retfree = 0;

	n = swapctl(SWAP_NSWAP, 0, 0);

	if (n < 1) {
		warn("could not get swap information");
		return 0;
	}

	sep = (struct swapent *) malloc(n * (sizeof(*sep)));

	if (sep == NULL) {
		warn("memory allocation failed");
		return 0;
	}

	if (swapctl(SWAP_STATS, (void *) sep, n) < n) {
		warn("could not get swap stats");
		return 0;
	}
	for (; n > 0; n--) {
		*retavail += (int) dbtob(sep[n - 1].se_nblks);
		*retfree += (int) dbtob(sep[n - 1].se_nblks - sep[n - 1].se_inuse);
	}
	*retavail = (int) (*retavail / 1024);
	*retfree = (int) (*retfree / 1024);

	return 1;
}
/*
 * Dump the machine-dependent dump header.
 */
u_int
cpu_dump(int (*dump)(dev_t, daddr_t, caddr_t, size_t), daddr_t *blknop)
{
	extern cpu_kcore_hdr_t cpu_kcore_hdr;
	char buf[dbtob(1)];
	cpu_kcore_hdr_t *h;
	kcore_seg_t *kseg;
	int rc;

#ifdef DIAGNOSTIC
	if (cpu_dumpsize() > btodb(sizeof buf)) {
		printf("buffer too small in cpu_dump, ");
		return (EINVAL);	/* "aborted" */
	}
#endif

	bzero(buf, sizeof buf);
	kseg = (kcore_seg_t *)buf;
	h = (cpu_kcore_hdr_t *)(buf + ALIGN(sizeof(kcore_seg_t)));

	/* Create the segment header */
	CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
	kseg->c_size = dbtob(1) - ALIGN(sizeof(kcore_seg_t));

	bcopy(&cpu_kcore_hdr, h, sizeof(*h));
	/* We can now fill kptp in the header... */
	h->kcore_kptp = SH3_P1SEG_TO_PHYS((vaddr_t)pmap_kernel()->pm_ptp);

	rc = (*dump)(dumpdev, *blknop, buf, sizeof buf);
	*blknop += btodb(sizeof buf);
	return (rc);
}
Beispiel #4
0
void
dumpconf(void)
{
	int nblks;

	if (dumpdev == NODEV ||
	    (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
		return;
	if (nblks <= ctod(1))
		return;

	dumpsize = physmem;
	if (dumpsize > atop(dbtob(nblks - dumplo)))
		dumpsize = atop(dbtob(nblks - dumplo));
	else if (dumplo == 0)
		dumplo = nblks - btodb(ptoa(dumpsize));

	/*
	 * Don't dump on the first block in case the dump
	 * device includes a disk label.
	 */
	if (dumplo < btodb(PAGE_SIZE))
		dumplo = btodb(PAGE_SIZE);

	/* Put dump at the end of partition, and make it fit. */
	if (dumpsize + 1 > dtoc(nblks - dumplo))
		dumpsize = dtoc(nblks - dumplo) - 1;
	if (dumplo < nblks - ctod(dumpsize) - 1)
		dumplo = nblks - ctod(dumpsize) - 1;

	/* memory is contiguous on vax */
	cpu_kcore_hdr.ram_segs[0].start = 0;
	cpu_kcore_hdr.ram_segs[0].size = ptoa(physmem);
	cpu_kcore_hdr.sysmap = (vaddr_t)Sysmap;
}
Beispiel #5
0
void
cpu_dumpconf(void)
{
	int	nblks;

	/*
	 * XXX include the final RAM page which is not included in physmem.
	 */
	if (dumpdev == NODEV)
		return;
	nblks = bdev_size(dumpdev);
	if (nblks > 0) {
		if (dumpsize > btoc(dbtob(nblks - dumplo)))
			dumpsize = btoc(dbtob(nblks - dumplo));
		else if (dumplo == 0)
			dumplo = nblks - btodb(ctob(dumpsize));
	}
	/*
	 * Don't dump on the first PAGE_SIZE (why PAGE_SIZE?) in case the dump
	 * device includes a disk label.
	 */
	if (dumplo < btodb(PAGE_SIZE))
		dumplo = btodb(PAGE_SIZE);

	/*
	 * If we have nothing to dump (XXX implement crash dumps),
	 * make it clear for savecore that there is no dump.
	 */
	if (dumpsize <= 0)
		dumplo = 0;
}
Beispiel #6
0
/*
 * Convert a quotause list to an ASCII file.
 */
int
writeprivs(struct quotause *quplist, int outfd, char *name, int quotatype)
{
	struct quotause *qup;
	FILE *fd;

	ftruncate(outfd, 0);
	lseek(outfd, 0, L_SET);
	if ((fd = fdopen(dup(outfd), "w")) == NULL)
		err(1, "%s", tmpfil);
	fprintf(fd, "Quotas for %s %s:\n", qfextension[quotatype], name);
	for (qup = quplist; qup; qup = qup->next) {
		fprintf(fd, "%s: %s %lu, limits (soft = %lu, hard = %lu)\n",
		    qup->fsname, "kbytes in use:",
		    (unsigned long)(dbtob(qup->dqblk.dqb_curblocks) / 1024),
		    (unsigned long)(dbtob(qup->dqblk.dqb_bsoftlimit) / 1024),
		    (unsigned long)(dbtob(qup->dqblk.dqb_bhardlimit) / 1024));
		fprintf(fd, "%s %lu, limits (soft = %lu, hard = %lu)\n",
		    "\tinodes in use:",
		    (unsigned long)qup->dqblk.dqb_curinodes,
		    (unsigned long)qup->dqblk.dqb_isoftlimit,
		    (unsigned long)qup->dqblk.dqb_ihardlimit);
	}
	fclose(fd);
	return (1);
}
/*
 * NAMES:	raid_pw_write
 * DESCRIPTION: issue a syncronous write to write a pre-write entry
 * PARAMETERS:	mr_unit_t *un - pointer to the unit structure
 *		int	column	- column number for the pre-write entry
 *		raid_pwhdr_t   *pwhp - needed for some infos about the pw header
 *		raid_rplybuf_t *bufp - pointer to the replay buffer structure
 * RETURNS:
 */
static int
raid_pw_write(mr_unit_t *un, int column, raid_pwhdr_t *pwhp,
    raid_rplybuf_t *bufp)
{
	buf_t	 *bp;
	int	 error;

	/* if this column is no longer accessible, return */
	if (!COLUMN_ISUP(un, column))
		return (RAID_RPLY_COMPREPLAY);

	/* set up pointers from raid_rplybuf_t *bufp */
	bp = (buf_t *)bufp->rpl_buf;

	/* calculate the data address or block number */
	bp->b_un.b_addr = bufp->rpl_data + DEV_BSIZE;
	bp->b_bufsize = dbtob(pwhp->rpw_blkcnt);
	bp->b_bcount = dbtob(pwhp->rpw_blkcnt);
	bp->b_flags = (B_WRITE | B_BUSY);
	bp->b_edev  = md_dev64_to_dev(un->un_column[column].un_dev);
	bp->b_lblkno = un->un_column[column].un_devstart + pwhp->rpw_blkno;
	bp->b_iodone = pw_write_done;
	(void) md_call_strategy(bp, 0, NULL);
	if (biowait(bp)) {
		error = raid_replay_error(un, column);
		return (error);
	}
	return (0);
}
Beispiel #8
0
/*
 * A buffer is written to the snapshotted block device. Copy to
 * backing store if needed.
 */
static int
fss_copy_on_write(void *v, struct buf *bp, bool data_valid)
{
	int error;
	u_int32_t cl, ch, c;
	struct fss_softc *sc = v;

	mutex_enter(&sc->sc_slock);
	if (!FSS_ISVALID(sc)) {
		mutex_exit(&sc->sc_slock);
		return 0;
	}

	cl = FSS_BTOCL(sc, dbtob(bp->b_blkno));
	ch = FSS_BTOCL(sc, dbtob(bp->b_blkno)+bp->b_bcount-1);
	error = 0;
	if (curlwp == uvm.pagedaemon_lwp) {
		for (c = cl; c <= ch; c++)
			if (isclr(sc->sc_copied, c)) {
				error = ENOMEM;
				break;
			}
	}
	mutex_exit(&sc->sc_slock);

	if (error == 0)
		for (c = cl; c <= ch; c++) {
			error = fss_read_cluster(sc, c);
			if (error)
				break;
		}

	return error;
}
Beispiel #9
0
static void
physio_done(struct work *wk, void *dummy)
{
	struct buf *bp = (void *)wk;
	size_t todo = bp->b_bufsize;
	size_t done = bp->b_bcount - bp->b_resid;
	struct physio_stat *ps = bp->b_private;
	bool is_iobuf;

	KASSERT(&bp->b_work == wk);
	KASSERT(bp->b_bcount <= todo);
	KASSERT(bp->b_resid <= bp->b_bcount);
	KASSERT((bp->b_flags & B_PHYS) != 0);
	KASSERT(dummy == NULL);

	vunmapbuf(bp, todo);
	uvm_vsunlock(bp->b_proc->p_vmspace, bp->b_data, todo);

	mutex_enter(&ps->ps_lock);
	is_iobuf = (bp != ps->ps_orig_bp);
	if (__predict_false(done != todo)) {
		off_t endoffset = dbtob(bp->b_blkno) + done;

		/*
		 * we got an error or hit EOM.
		 *
		 * we only care about the first one.
		 * ie. the one at the lowest offset.
		 */

		KASSERT(ps->ps_endoffset != endoffset);
		DPRINTF(("%s: error=%d at %" PRIu64 " - %" PRIu64
		    ", blkno=%" PRIu64 ", bcount=%d, flags=0x%x\n",
		    __func__, bp->b_error, dbtob(bp->b_blkno), endoffset,
		    bp->b_blkno, bp->b_bcount, bp->b_flags));

		if (ps->ps_endoffset == -1 || endoffset < ps->ps_endoffset) {
			DPRINTF(("%s: ps=%p, error %d -> %d, endoff %" PRIu64
			    " -> %" PRIu64 "\n",
			    __func__, ps,
			    ps->ps_error, bp->b_error,
			    ps->ps_endoffset, endoffset));

			ps->ps_endoffset = endoffset;
			ps->ps_error = bp->b_error;
		}
		ps->ps_failed++;
	} else {
		KASSERT(bp->b_error == 0);
	}

	ps->ps_running--;
	cv_signal(&ps->ps_cv);
	mutex_exit(&ps->ps_lock);

	if (is_iobuf)
		putiobuf(bp);
}
Beispiel #10
0
static void
devread(int fd, void *buf, daddr_t blk, size_t size, char *msg)
{
	if (lseek(fd, dbtob((off_t)blk), SEEK_SET) != dbtob((off_t)blk))
		err(1, "%s: devread: lseek", msg);

	if (read(fd, buf, size) != size)
		err(1, "%s: devread: read", msg);
}
Beispiel #11
0
/*
 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
 */
int
cpu_dumpsize(void)
{
	int size;

	size = ALIGN(sizeof(kcore_seg_t)) +
	    ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
	if (roundup(size, dbtob(1)) != dbtob(1))
		return (-1);

	return (1);
}
Beispiel #12
0
/*
 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
 */
int
cpu_dumpsize()
{
	int size;

	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
	    ALIGN( bootconfig.dramblocks * sizeof(phys_ram_seg_t));
	if (roundup(size, dbtob(1)) != dbtob(1))
		return (-1);

	return (1);
}
Beispiel #13
0
int
devread(int fd, void *buf, daddr_t blk, size_t size, char *msg)
{

    if (lseek(fd, (off_t)dbtob(blk), SEEK_SET) != dbtob(blk)) {
        warn("%s: devread: lseek", msg);
        return 1;
    }
    if (read(fd, buf, size) != size) {
        warn("%s: devread: read", msg);
        return 1;
    }
    return 0;
}
/*
 * NAME:	init_pw_area
 *
 * DESCRIPTION: Initialize pre-write area to all zeros.
 *
 * PARAMETERS:	minor_t	      mnum      - minor number identity of metadevice
 *		md_dev64_t dev_to_write - index of column to resync
 *		int   column_index      - index of column to resync
 *
 * RETURN:	1 if write error on resync device, otherwise 0
 *
 * LOCKS:	Expects Unit Reader Lock to be held across call.
 */
int
init_pw_area(
	mr_unit_t *un,
	md_dev64_t dev_to_write,
	diskaddr_t pwstart,
	uint_t	col
)
{
	buf_t	buf;
	caddr_t	databuffer;
	size_t	copysize;
	size_t	bsize;
	int	error = 0;
	int	i;

	ASSERT(un != NULL);
	ASSERT(un->un_column[col].un_devflags & MD_RAID_DEV_ISOPEN);

	bsize = un->un_iosize;
	copysize = dbtob(bsize);
	databuffer = kmem_zalloc(copysize, KM_SLEEP);
	init_buf(&buf, (B_BUSY | B_WRITE), copysize);

	for (i = 0; i < un->un_pwcnt; i++) {
		/* magic field is 0 for 4.0 compatability */
		RAID_FILLIN_RPW(databuffer, un, 0, 0,
				0, 0, 0,
				0, col, 0);
		buf.b_un.b_addr = (caddr_t)databuffer;
		buf.b_edev = md_dev64_to_dev(dev_to_write);
		buf.b_bcount = dbtob(bsize);
		buf.b_lblkno = pwstart + (i * un->un_iosize);

		/* write buf */
		(void) md_call_strategy(&buf, MD_STR_NOTTOP, NULL);

		if (biowait(&buf)) {
			error = 1;
			break;
		}
		reset_buf(&buf, (B_BUSY | B_WRITE), copysize);
	} /* for */

	destroy_buf(&buf);
	kmem_free(databuffer, copysize);

	return (error);
}
Beispiel #15
0
/*
 * Write a superblock to the devfd device from the memory pointed to by fs.
 * Write out the superblock summary information if it is present.
 *
 * If the write is successful, zero is returned. Otherwise one of the
 * following error values is returned:
 *     EIO: failed to write superblock.
 *     EIO: failed to write superblock summary information.
 */
int
ffs_sbput(void *devfd, struct fs *fs, off_t loc,
    int (*writefunc)(void *devfd, off_t loc, void *buf, int size))
{
	int i, error, blks, size;
	uint8_t *space;

	/*
	 * If there is summary information, write it first, so if there
	 * is an error, the superblock will not be marked as clean.
	 */
	if (fs->fs_csp != NULL) {
		blks = howmany(fs->fs_cssize, fs->fs_fsize);
		space = (uint8_t *)fs->fs_csp;
		for (i = 0; i < blks; i += fs->fs_frag) {
			size = fs->fs_bsize;
			if (i + fs->fs_frag > blks)
				size = (blks - i) * fs->fs_fsize;
			if ((error = (*writefunc)(devfd,
			     dbtob(fsbtodb(fs, fs->fs_csaddr + i)),
			     space, size)) != 0)
				return (error);
			space += size;
		}
	}
	fs->fs_fmod = 0;
	fs->fs_time = UFS_TIME;
	if ((error = (*writefunc)(devfd, loc, fs, fs->fs_sbsize)) != 0)
		return (error);
	return (0);
}
Beispiel #16
0
void
dumpconf(void)
{
	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
	u_int dumpextra, totaldumpsize;		/* in disk blocks */
	u_int seg, nblks;

	if (dumpdev == NODEV ||
	    (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
		return;
	if (nblks <= ctod(1))
		return;

	dumpsize = 0;
	for (seg = 0; seg < h->kcore_nsegs; seg++)
		dumpsize += atop(h->kcore_segs[seg].size);
	dumpextra = cpu_dumpsize();

	/* Always skip the first block, in case there is a label there. */
	if (dumplo < btodb(1))
		dumplo = btodb(1);

	/* Put dump at the end of the partition, and make it fit. */
	totaldumpsize = ctod(dumpsize) + dumpextra;
	if (totaldumpsize > nblks - dumplo) {
		totaldumpsize = dbtob(nblks - dumplo);
		dumpsize = dtoc(totaldumpsize - dumpextra);
	}
	if (dumplo < nblks - totaldumpsize)
		dumplo = nblks - totaldumpsize;
}
/*
 * Wrapper to enable Harvey's channel read function to be used like FreeBSD's
 * block read function.
 * Use when reading relative to a vnode.
 */
int32_t
bread(vnode *vn, daddr_t lblkno, size_t size, Buf **buf)
{
	daddr_t pblkno;
	int rcode = ufs_bmaparray(vn, lblkno, &pblkno, nil, nil, nil);
	if (rcode) {
		print("bread failed to transform logical block to physical\n");
		return 1;
	}

	Buf *b = newbuf(size);
	b->vnode = vn;

	MountPoint *mp = vn->mount;
	Chan *c = mp->chan;
	int64_t offset = dbtob(pblkno);

	int32_t bytesRead = c->dev->read(c, b->data, size, offset);

	if (bytesRead != size) {
		releasebuf(b);
		print("bread returned wrong size\n");
		return 1;
	}

	b->resid = size - bytesRead;
	*buf = b;
	return 0;
}
Beispiel #18
0
/*
 * the actual size of the statefile data isn't known until after all the
 * compressed pages are written; even the inode size doesn't reflect the
 * data size since there are usually many extra fs blocks.  for recording
 * the actual data size, the first sector of the statefile is copied to
 * a tmp buf, and the copy is later updated and flushed to disk.
 */
int
i_cpr_blockzero(char *base, char **bufpp, int *blkno, vnode_t *vp)
{
	extern int cpr_flush_write(vnode_t *);
	static char cpr_sector[DEV_BSIZE];
	cpr_ext bytes, *dst;

	/*
	 * this routine is called after cdd_t and csu_md_t are copied
	 * to cpr_buf; mini-hack alert: the save/update method creates
	 * a dependency on the combined struct size being >= one sector
	 * or DEV_BSIZE; since introduction in Sol2.7, csu_md_t size is
	 * over 1K bytes and will probably grow with any changes.
	 *
	 * copy when vp is NULL, flush when non-NULL
	 */
	if (vp == NULL) {
		ASSERT((*bufpp - base) >= DEV_BSIZE);
		bcopy(base, cpr_sector, sizeof (cpr_sector));
		return (0);
	} else {
		bytes = dbtob(*blkno);
		dst = &((cdd_t *)cpr_sector)->cdd_filesize;
		bcopy(&bytes, dst, sizeof (bytes));
		bcopy(cpr_sector, base, sizeof (cpr_sector));
		*bufpp = base + sizeof (cpr_sector);
		*blkno = cpr_statefile_offset();
		CPR_DEBUG(CPR_DEBUG1, "statefile data size: %ld\n\n", bytes);
		return (cpr_flush_write(vp));
	}
}
Beispiel #19
0
/*
 * This function increments the inode version number
 *
 * This may be used one day by the NFS server
 */
static void
inc_inode_version(struct inode *inode, struct ext2_group_desc *gdp, int mode)
{
	unsigned long inode_block;
	struct buf *bh;
	struct ext2_inode *raw_inode;

	inode_block = gdp->bg_inode_table + (((inode->i_number - 1) %
			EXT2_INODES_PER_GROUP(inode->i_sb)) /
			EXT2_INODES_PER_BLOCK(inode->i_sb));
	bh = bread (inode->i_sb->s_dev, dbtob(inode_block), inode->i_sb->s_blocksize);
	if (!bh) {
		kprintf ("inc_inode_version Cannot load inode table block - "
			    "inode=%lu, inode_block=%lu\n",
			    inode->i_number, inode_block);
		inode->u.ext2_i.i_version = 1;
		return;
	}
	raw_inode = ((struct ext2_inode *) bh->b_data) +
			(((inode->i_number - 1) %
			EXT2_INODES_PER_GROUP(inode->i_sb)) %
			EXT2_INODES_PER_BLOCK(inode->i_sb));
	raw_inode->i_version++;
	inode->u.ext2_i.i_version = raw_inode->i_version;
	bdwrite (bh);
}
Beispiel #20
0
/*
 * Initiate IO on given buffer.
 */
int
xfs_buf_iorequest(struct xfs_buf *bp)
{
	bp->b_flags &= ~(B_INVAL|B_DONE);
	bp->b_ioflags &= ~BIO_ERROR;

	if (bp->b_flags & B_ASYNC)
		BUF_KERNPROC(bp);

	if (bp->b_vp == NULL) {
		if (bp->b_iocmd == BIO_WRITE) {
			bp->b_flags &= ~(B_DELWRI | B_DEFERRED);
			bufobj_wref(bp->b_bufobj);
		}

		bp->b_iooffset = (bp->b_blkno << BBSHIFT);
		bstrategy(bp);
	} else {
		if (bp->b_iocmd == BIO_WRITE) {
			/* Mark the buffer clean */
			bundirty(bp);
			bufobj_wref(bp->b_bufobj);
			vfs_busy_pages(bp, 1);
		} else if (bp->b_iocmd == BIO_READ) {
			vfs_busy_pages(bp, 0);
		}
		bp->b_iooffset = dbtob(bp->b_blkno);
		bstrategy(bp);
	}
	return 0;
}
Beispiel #21
0
/*
 * This is an estimation of the number of TP_BSIZE blocks in the file.
 * It estimates the number of blocks in files with holes by assuming
 * that all of the blocks accounted for by di_blocks are data blocks
 * (when some of the blocks are usually used for indirect pointers);
 * hence the estimate may be high.
 */
int64_t
blockest(union dinode *dp)
{
	int64_t blkest, sizeest;

	/*
	 * dp->di_size is the size of the file in bytes.
	 * dp->di_blocks stores the number of sectors actually in the file.
	 * If there are more sectors than the size would indicate, this just
	 *	means that there are indirect blocks in the file or unused
	 *	sectors in the last file block; we can safely ignore these
	 *	(blkest = sizeest below).
	 * If the file is bigger than the number of sectors would indicate,
	 *	then the file has holes in it.	In this case we must use the
	 *	block count to estimate the number of data blocks used, but
	 *	we use the actual size for estimating the number of indirect
	 *	dump blocks (sizeest vs. blkest in the indirect block
	 *	calculation).
	 */
	blkest = howmany(dbtob((int64_t)DIP(dp, di_blocks)), TP_BSIZE);
	sizeest = howmany((int64_t)DIP(dp, di_size), TP_BSIZE);
	if (blkest > sizeest)
		blkest = sizeest;
	if (DIP(dp, di_size) > sblock->fs_bsize * NDADDR) {
		/* calculate the number of indirect blocks on the dump tape */
		blkest +=
			howmany(sizeest - NDADDR * sblock->fs_bsize / TP_BSIZE,
			TP_NINDIR);
	}
	return (blkest + 1);
}
Beispiel #22
0
/*
 * Calculate the logical to physical mapping if not done already,
 * then call the device strategy routine.
 *
 * In order to be able to swap to a file, the ext2_bmaparray() operation may not
 * deadlock on memory.  See ext2_bmap() for details.
 */
static int
ext2_strategy(struct vop_strategy_args *ap)
{
	struct buf *bp = ap->a_bp;
	struct vnode *vp = ap->a_vp;
	struct inode *ip;
	struct bufobj *bo;
	int32_t blkno;
	int error;

	ip = VTOI(vp);
	if (vp->v_type == VBLK || vp->v_type == VCHR)
		panic("ext2_strategy: spec");
	if (bp->b_blkno == bp->b_lblkno) {
		error = ext2_bmaparray(vp, bp->b_lblkno, &blkno, NULL, NULL);
		bp->b_blkno = blkno;
		if (error) {
			bp->b_error = error;
			bp->b_ioflags |= BIO_ERROR;
			bufdone(bp);
			return (0);
		}
		if ((long)bp->b_blkno == -1)
			vfs_bio_clrbuf(bp);
	}
	if ((long)bp->b_blkno == -1) {
		bufdone(bp);
		return (0);
	}
	bp->b_iooffset = dbtob(bp->b_blkno);
	bo = VFSTOEXT2(vp->v_mount)->um_bo;
	BO_STRATEGY(bo, bp);
	return (0);
}
Beispiel #23
0
static int
reiserfs_getattr(struct vop_getattr_args *ap)
{
    struct vnode *vp         = ap->a_vp;
    struct vattr *vap        = ap->a_vap;
    struct reiserfs_node *ip = VTOI(vp);

    vap->va_fsid      = dev2udev(ip->i_dev);
    vap->va_fileid    = ip->i_number;
    vap->va_mode      = ip->i_mode & ~S_IFMT;
    vap->va_nlink     = ip->i_nlink;
    vap->va_uid       = ip->i_uid;
    vap->va_gid       = ip->i_gid;
    //XXX vap->va_rdev      = ip->i_rdev;
    vap->va_size      = ip->i_size;
    vap->va_atime     = ip->i_atime;
    vap->va_mtime     = ip->i_mtime;
    vap->va_ctime     = ip->i_ctime;
    vap->va_flags     = ip->i_flags;
    vap->va_gen       = ip->i_generation;
    vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize;
    vap->va_bytes     = dbtob((u_quad_t)ip->i_blocks);
    vap->va_type      = vp->v_type;
    //XXX vap->va_filerev   = ip->i_modrev;

    return (0);
}
Beispiel #24
0
/*
 * cpu_dump: dump the machine-dependent kernel core dump headers.
 */
int
cpu_dump()
{
	int (*dump)(dev_t, daddr_t, void *, size_t);
	char bf[dbtob(1)];
	kcore_seg_t *segp;
	cpu_kcore_hdr_t *cpuhdrp;
	phys_ram_seg_t *memsegp;
	const struct bdevsw *bdev;
	int i;

	bdev = bdevsw_lookup(dumpdev);
	if (bdev == NULL)
		return (ENXIO);
	dump = bdev->d_dump;

	memset(bf, 0, sizeof bf);
	segp = (kcore_seg_t *)bf;
	cpuhdrp = (cpu_kcore_hdr_t *)&bf[ALIGN(sizeof(*segp))];
	memsegp = (phys_ram_seg_t *)&bf[ ALIGN(sizeof(*segp)) +
	    ALIGN(sizeof(*cpuhdrp))];

	/*
	 * Generate a segment header.
	 */
	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));

	/*
	 * Add the machine-dependent header info.
	 */
	cpuhdrp->version = 1;
	cpuhdrp->PAKernelL1Table = pmap_kernel_L1_addr();
	cpuhdrp->UserL1TableSize = 0;
	cpuhdrp->nmemsegs = bootconfig.dramblocks;
	cpuhdrp->omemsegs = ALIGN(sizeof(*cpuhdrp));

	/*
	 * Fill in the memory segment descriptors.
	 */
	for (i = 0; i < bootconfig.dramblocks; i++) {
		memsegp[i].start = bootconfig.dram[i].address;
		memsegp[i].size = bootconfig.dram[i].pages * PAGE_SIZE;
	}

	return (dump(dumpdev, dumplo, bf, dbtob(1)));
}
Beispiel #25
0
/*
 * Return the size of the machine-dependent dump header, in disk blocks.
 */
u_int
cpu_dumpsize()
{
	u_int size;

	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
	return (btodb(roundup(size, dbtob(1))));
}
void
RAW_READ(void *buf, daddr_t blkpos, size_t bytelen)
{

	if (pread(fd, buf, bytelen, (off_t)dbtob(blkpos)) != (ssize_t) bytelen)
		err(1, "pread: buf %p, blk %d, len %u",
		    buf, (int) blkpos, bytelen);
}
static int
ld_ataraid_start_span(struct ld_softc *ld, struct buf *bp)
{
	struct ld_ataraid_softc *sc = (void *) ld;
	struct ataraid_array_info *aai = sc->sc_aai;
	struct ataraid_disk_info *adi;
	struct cbuf *cbp;
	char *addr;
	daddr_t bn;
	long bcount, rcount;
	u_int comp;

	/* Allocate component buffers. */
	addr = bp->b_data;

	/* Find the first component. */
	comp = 0;
	adi = &aai->aai_disks[comp];
	bn = bp->b_rawblkno;
	while (bn >= adi->adi_compsize) {
		bn -= adi->adi_compsize;
		adi = &aai->aai_disks[++comp];
	}

	bp->b_resid = bp->b_bcount;

	for (bcount = bp->b_bcount; bcount > 0; bcount -= rcount) {
		rcount = bp->b_bcount;
		if ((adi->adi_compsize - bn) < btodb(rcount))
			rcount = dbtob(adi->adi_compsize - bn);

		cbp = ld_ataraid_make_cbuf(sc, bp, comp, bn, addr, rcount);
		if (cbp == NULL) {
			/* Free the already allocated component buffers. */
                       while ((cbp = SIMPLEQ_FIRST(&sc->sc_cbufq)) != NULL) {
                               SIMPLEQ_REMOVE_HEAD(&sc->sc_cbufq, cb_q);
				CBUF_PUT(cbp);
			}
                       return EAGAIN;
		}

		/*
		 * For a span, we always know we advance to the next disk,
		 * and always start at offset 0 on that disk.
		 */
		adi = &aai->aai_disks[++comp];
		bn = 0;

               SIMPLEQ_INSERT_TAIL(&sc->sc_cbufq, cbp, cb_q);
		addr += rcount;
	}

	/* Now fire off the requests. */
       softint_schedule(sc->sc_sih_cookie);

       return 0;
}
Beispiel #28
0
static int
getfreespace(struct vol *vol, VolSpace *bfree, VolSpace *btotal,
    uid_t uid, const char *classq)
{
	int retq;
	struct ufs_quota_entry ufsq[QUOTA_NLIMITS];
	time_t now;

	if (time(&now) == -1) {
		LOG(log_info, logtype_afpd, "time(): %s",
		    strerror(errno));
		return -1;
	}

	if ( seteuid( getuid() ) != 0 )  {
		LOG(log_info, logtype_afpd, "seteuid(): %s",
		    strerror(errno));
		return -1;
	}
	if ((retq = getfsquota(vol->v_path, ufsq, uid, classq)) < 0) {
		LOG(log_info, logtype_afpd, "getfsquota(%s, %s): %s",
		    vol->v_path, classq, strerror(errno));
	}
        seteuid( uid );
	if (retq < 1)
		return retq;

	switch(QL_STATUS(quota_check_limit(ufsq[QUOTA_LIMIT_BLOCK].ufsqe_cur, 1,
	    ufsq[QUOTA_LIMIT_BLOCK].ufsqe_softlimit,
	    ufsq[QUOTA_LIMIT_BLOCK].ufsqe_hardlimit,
	    ufsq[QUOTA_LIMIT_BLOCK].ufsqe_time, now))) {
	case QL_S_DENY_HARD:
	case QL_S_DENY_GRACE:
		*bfree = 0;
		*btotal = dbtob(ufsq[QUOTA_LIMIT_BLOCK].ufsqe_cur);
		break;
	default:
		*bfree = dbtob(ufsq[QUOTA_LIMIT_BLOCK].ufsqe_hardlimit -
		    ufsq[QUOTA_LIMIT_BLOCK].ufsqe_cur);
		*btotal = dbtob(ufsq[QUOTA_LIMIT_BLOCK].ufsqe_hardlimit);
		break;
	}
	return 1;
}
Beispiel #29
0
/*
 * lookup device size in blocks,
 * and return available space in bytes
 */
size_t
cpr_get_devsize(dev_t dev)
{
	size_t bytes = 0;
	int64_t Nblocks;
	int nblocks;

	if ((Nblocks = bdev_Size(dev)) != -1)
		bytes = dbtob(Nblocks);
	else if ((nblocks = bdev_size(dev)) != -1)
		bytes = dbtob(nblocks);

	if (bytes > CPR_SPEC_OFFSET)
		bytes -= CPR_SPEC_OFFSET;
	else
		bytes = 0;

	return (bytes);
}
Beispiel #30
0
Datei: ld.c Projekt: MarginC/kame
/*
 * Take a dump.
 */
int
lddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
{
	struct ld_softc *sc;
	struct disklabel *lp;
	int unit, part, nsects, sectoff, towrt, nblk, maxblkcnt, rv;
	static int dumping;

	unit = DISKUNIT(dev);
	if ((sc = device_lookup(&ld_cd, unit)) == NULL)
		return (ENXIO);
	if ((sc->sc_flags & LDF_ENABLED) == 0)
		return (ENODEV);
	if (sc->sc_dump == NULL)
		return (ENXIO);

	/* Check if recursive dump; if so, punt. */
	if (dumping)
		return (EFAULT);
	dumping = 1;

	/* Convert to disk sectors.  Request must be a multiple of size. */
	part = DISKPART(dev);
	lp = sc->sc_dk.dk_label;
	if ((size % lp->d_secsize) != 0)
		return (EFAULT);
	towrt = size / lp->d_secsize;
	blkno = dbtob(blkno) / lp->d_secsize;	/* blkno in DEV_BSIZE units */

	nsects = lp->d_partitions[part].p_size;
	sectoff = lp->d_partitions[part].p_offset;

	/* Check transfer bounds against partition size. */
	if ((blkno < 0) || ((blkno + towrt) > nsects))
		return (EINVAL);

	/* Offset block number to start of partition. */
	blkno += sectoff;

	/* Start dumping and return when done. */
	maxblkcnt = sc->sc_maxxfer / sc->sc_secsize - 1;
	while (towrt > 0) {
		nblk = min(maxblkcnt, towrt);

		if ((rv = (*sc->sc_dump)(sc, va, blkno, nblk)) != 0)
			return (rv);

		towrt -= nblk;
		blkno += nblk;
		va += nblk * sc->sc_secsize;
	}

	dumping = 0;
	return (0);
}