static int zpl_writepages(struct address_space *mapping, struct writeback_control *wbc) { znode_t *zp = ITOZ(mapping->host); zfs_sb_t *zsb = ITOZSB(mapping->host); enum writeback_sync_modes sync_mode; int result; ZFS_ENTER(zsb); if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) wbc->sync_mode = WB_SYNC_ALL; ZFS_EXIT(zsb); sync_mode = wbc->sync_mode; /* * We don't want to run write_cache_pages() in SYNC mode here, because * that would make putpage() wait for a single page to be committed to * disk every single time, resulting in atrocious performance. Instead * we run it once in non-SYNC mode so that the ZIL gets all the data, * and then we commit it all in one go. */ wbc->sync_mode = WB_SYNC_NONE; result = write_cache_pages(mapping, wbc, zpl_putpage, mapping); if (sync_mode != wbc->sync_mode) { ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); if (zsb->z_log != NULL) zil_commit(zsb->z_log, zp->z_id); ZFS_EXIT(zsb); /* * We need to call write_cache_pages() again (we can't just * return after the commit) because the previous call in * non-SYNC mode does not guarantee that we got all the dirty * pages (see the implementation of write_cache_pages() for * details). That being said, this is a no-op in most cases. */ wbc->sync_mode = sync_mode; result = write_cache_pages(mapping, wbc, zpl_putpage, mapping); } return (result); }
/* * Lookup/Create an extended attribute entry. * * Input arguments: * dzp - znode for hidden attribute directory * name - name of attribute * flag - ZNEW: if the entry already exists, fail with EEXIST. * ZEXISTS: if the entry does not exist, fail with ENOENT. * * Output arguments: * vpp - pointer to the vnode for the entry (NULL if there isn't one) * * Return value: 0 on success or errno value on failure. */ int zfs_obtain_xattr(znode_t *dzp, const char *name, mode_t mode, cred_t *cr, vnode_t **vpp, int flag) { znode_t *xzp = NULL; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zilog_t *zilog; zfs_dirlock_t *dl; dmu_tx_t *tx; struct vnode_attr vattr; int error; struct componentname cn; zfs_acl_ids_t acl_ids; /* zfs_dirent_lock() expects a component name */ bzero(&cn, sizeof (cn)); cn.cn_nameiop = LOOKUP; cn.cn_flags = ISLASTCN; cn.cn_nameptr = (char *)name; cn.cn_namelen = strlen(name); ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(dzp); zilog = zfsvfs->z_log; VATTR_INIT(&vattr); VATTR_SET(&vattr, va_type, VREG); VATTR_SET(&vattr, va_mode, mode & ~S_IFMT); if ((error = zfs_acl_ids_create(dzp, 0, &vattr, cr, NULL, &acl_ids)) != 0) { ZFS_EXIT(zfsvfs); return (error); } top: /* Lock the attribute entry name. */ if ( (error = zfs_dirent_lock(&dl, dzp, (char *)name, &xzp, flag, NULL, &cn)) ) { goto out; } /* If the name already exists, we're done. */ if (xzp != NULL) { zfs_dirent_unlock(dl); goto out; } tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); //dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); //dmu_tx_hold_bonus(tx, dzp->z_id); dmu_tx_hold_zap(tx, dzp->z_id, TRUE, (char *)name); #if 1 // FIXME if (dzp->z_pflags & ZFS_INHERIT_ACE) { dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE); } #endif zfs_sa_upgrade_txholds(tx, dzp); error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { zfs_dirent_unlock(dl); if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } dmu_tx_abort(tx); goto out; } zfs_mknode(dzp, &vattr, tx, cr, 0, &xzp, &acl_ids); /* ASSERT(xzp->z_id == zoid); */ (void) zfs_link_create(dl, xzp, tx, ZNEW); zfs_log_create(zilog, tx, TX_CREATE, dzp, xzp, (char *)name, NULL /* vsecp */, 0 /*acl_ids.z_fuidp*/, &vattr); zfs_acl_ids_free(&acl_ids); dmu_tx_commit(tx); zfs_znode_wait_vnode(xzp); zfs_dirent_unlock(dl); out: if (error == EEXIST) error = ENOATTR; if (xzp) *vpp = ZTOV(xzp); ZFS_EXIT(zfsvfs); return (error); }