Esempio n. 1
0
void
zfs_sa_symlink(znode_t *zp, char *link, int len, dmu_tx_t *tx)
{
	dmu_buf_t *db = sa_get_db(zp->z_sa_hdl);

	if (ZFS_OLD_ZNODE_PHYS_SIZE + len <= dmu_bonus_max()) {
		VERIFY(dmu_set_bonus(db,
		    len + ZFS_OLD_ZNODE_PHYS_SIZE, tx) == 0);
		if (len) {
			abd_copy_from_buf_off(db->db_data, link, len,
			    ZFS_OLD_ZNODE_PHYS_SIZE);
		}
	} else {
		dmu_buf_t *dbp;

		zfs_grow_blocksize(zp, len, tx);
		VERIFY(0 == dmu_buf_hold(ZTOZSB(zp)->z_os,
		    zp->z_id, 0, FTAG, &dbp, DMU_READ_NO_PREFETCH));

		dmu_buf_will_dirty(dbp, tx);

		ASSERT3U(len, <=, dbp->db_size);
		abd_copy_from_buf(dbp->db_data, link, len);
		dmu_buf_rele(dbp, FTAG);
	}
}
Esempio n. 2
0
File: mmp.c Progetto: LLNL/zfs
/*
 * Choose a random vdev, label, and MMP block, and write over it
 * with a copy of the last-synced uberblock, whose timestamp
 * has been updated to reflect that the pool is in use.
 */
static void
mmp_write_uberblock(spa_t *spa)
{
	int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
	mmp_thread_t *mmp = &spa->spa_mmp;
	uberblock_t *ub;
	vdev_t *vd;
	int label;
	uint64_t offset;

	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
	vd = mmp_random_leaf(spa->spa_root_vdev);
	if (vd == NULL) {
		spa_config_exit(spa, SCL_STATE, FTAG);
		return;
	}

	mutex_enter(&mmp->mmp_io_lock);

	if (mmp->mmp_zio_root == NULL)
		mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
		    flags | ZIO_FLAG_GODFATHER);

	ub = &mmp->mmp_ub;
	ub->ub_timestamp = gethrestime_sec();
	ub->ub_mmp_magic = MMP_MAGIC;
	ub->ub_mmp_delay = mmp->mmp_delay;
	vd->vdev_mmp_pending = gethrtime();

	zio_t *zio  = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
	abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
	abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
	abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));

	mutex_exit(&mmp->mmp_io_lock);

	offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) -
	    MMP_BLOCKS_PER_LABEL + spa_get_random(MMP_BLOCKS_PER_LABEL));

	label = spa_get_random(VDEV_LABELS);
	vdev_label_write(zio, vd, label, ub_abd, offset,
	    VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
	    flags | ZIO_FLAG_DONT_PROPAGATE);

	spa_mmp_history_add(ub->ub_txg, ub->ub_timestamp, ub->ub_mmp_delay, vd,
	    label);

	zio_nowait(zio);
}
Esempio n. 3
0
static void
dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
{
    dmu_buf_impl_t *db;
    int txgoff = tx->tx_txg & TXG_MASK;
    int nblkptr = dn->dn_phys->dn_nblkptr;
    int old_toplvl = dn->dn_phys->dn_nlevels - 1;
    int new_level = dn->dn_next_nlevels[txgoff];
    int i;

    rw_enter(&dn->dn_struct_rwlock, RW_WRITER);

    /* this dnode can't be paged out because it's dirty */
    ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
    ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
    ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);

    db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
    ASSERT(db != NULL);

    dn->dn_phys->dn_nlevels = new_level;
    dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
            dn->dn_object, dn->dn_phys->dn_nlevels);

    /* check for existing blkptrs in the dnode */
    for (i = 0; i < nblkptr; i++)
        if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
            break;
    if (i != nblkptr) {
        /* transfer dnode's block pointers to new indirect block */
        (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
        ASSERT(db->db.db_data);
        ASSERT(arc_released(db->db_buf));
        ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
        abd_copy_from_buf(db->db.db_data, dn->dn_phys->dn_blkptr,
                          sizeof (blkptr_t) * nblkptr);
        arc_buf_freeze(db->db_buf);
    }