/* * Link zp into dl. Can only fail if zp has been unlinked. */ int zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) { znode_t *dzp = dl->dl_dzp; #ifdef __APPLE__ uint64_t value; /* OSX - don't access the vnode here since it might not be attached yet. */ int zp_is_dir = S_ISDIR(zp->z_phys->zp_mode); #else vnode_t *vp = ZTOV(zp); uint64_t value; int zp_is_dir = (vp->v_type == VDIR); #endif int error; dmu_buf_will_dirty(zp->z_dbuf, tx); mutex_enter(&zp->z_lock); if (!(flag & ZRENAMING)) { if (zp->z_unlinked) { /* no new links to unlinked zp */ ASSERT(!(flag & (ZNEW | ZEXISTS))); mutex_exit(&zp->z_lock); return (ENOENT); } zp->z_phys->zp_links++; } zp->z_phys->zp_parent = dzp->z_id; /* dzp is now zp's parent */ if (!(flag & ZNEW)) zfs_time_stamper_locked(zp, STATE_CHANGED, tx); mutex_exit(&zp->z_lock); dmu_buf_will_dirty(dzp->z_dbuf, tx); mutex_enter(&dzp->z_lock); dzp->z_phys->zp_size++; /* one dirent added */ dzp->z_phys->zp_links += zp_is_dir; /* ".." link from zp */ zfs_time_stamper_locked(dzp, CONTENT_MODIFIED, tx); mutex_exit(&dzp->z_lock); value = zfs_dirent(zp); error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name, 8, 1, &value, tx); ASSERT(error == 0); #ifndef __APPLE__ /* On Mac OS X, this is done up in VFS layer. */ dnlc_update(ZTOV(dzp), dl->dl_name, vp); #endif return (0); }
/* * zfs_match_find() is used by zfs_dirent_lock() to peform zap lookups * of names after deciding which is the appropriate lookup interface. */ static int zfs_match_find(zfs_sb_t *zsb, znode_t *dzp, char *name, boolean_t exact, boolean_t update, int *deflags, pathname_t *rpnp, uint64_t *zoid) { boolean_t conflict = B_FALSE; int error; if (zsb->z_norm) { matchtype_t mt = MT_FIRST; size_t bufsz = 0; char *buf = NULL; if (rpnp) { buf = rpnp->pn_buf; bufsz = rpnp->pn_bufsize; } if (exact) mt = MT_EXACT; /* * In the non-mixed case we only expect there would ever * be one match, but we need to use the normalizing lookup. */ error = zap_lookup_norm(zsb->z_os, dzp->z_id, name, 8, 1, zoid, mt, buf, bufsz, &conflict); } else { error = zap_lookup(zsb->z_os, dzp->z_id, name, 8, 1, zoid); } /* * Allow multiple entries provided the first entry is * the object id. Non-zpl consumers may safely make * use of the additional space. * * XXX: This should be a feature flag for compatibility */ if (error == EOVERFLOW) error = 0; if (zsb->z_norm && !error && deflags) *deflags = conflict ? ED_CASE_CONFLICT : 0; *zoid = ZFS_DIRENT_OBJ(*zoid); #ifdef HAVE_DNLC if (error == ENOENT && update) dnlc_update(ZTOI(dzp), name, DNLC_NO_VNODE); #endif /* HAVE_DNLC */ return (error); }
/* * Link zp into dl. Can only fail if zp has been unlinked. */ int zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) { znode_t *dzp = dl->dl_dzp; vnode_t *vp = ZTOV(zp); uint64_t value; int zp_is_dir = (vp->v_type == VDIR); int error; dmu_buf_will_dirty(zp->z_dbuf, tx); mutex_enter(&zp->z_lock); if (!(flag & ZRENAMING)) { if (zp->z_unlinked) { /* no new links to unlinked zp */ ASSERT(!(flag & (ZNEW | ZEXISTS))); mutex_exit(&zp->z_lock); return (ENOENT); } zp->z_phys->zp_links++; } zp->z_phys->zp_parent = dzp->z_id; /* dzp is now zp's parent */ if (!(flag & ZNEW)) zfs_time_stamper_locked(zp, STATE_CHANGED, tx); mutex_exit(&zp->z_lock); dmu_buf_will_dirty(dzp->z_dbuf, tx); mutex_enter(&dzp->z_lock); dzp->z_phys->zp_size++; /* one dirent added */ dzp->z_phys->zp_links += zp_is_dir; /* ".." link from zp */ zfs_time_stamper_locked(dzp, CONTENT_MODIFIED, tx); mutex_exit(&dzp->z_lock); value = zfs_dirent(zp); error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name, 8, 1, &value, tx); ASSERT(error == 0); dnlc_update(ZTOV(dzp), dl->dl_name, vp); return (0); }
/* * zfs_match_find() is used by zfs_dirent_lock() to peform zap lookups * of names after deciding which is the appropriate lookup interface. */ static int zfs_match_find(zfs_sb_t *zsb, znode_t *dzp, char *name, boolean_t exact, boolean_t update, int *deflags, pathname_t *rpnp, uint64_t *zoid) { int error; if (zsb->z_norm) { matchtype_t mt = MT_FIRST; boolean_t conflict = B_FALSE; size_t bufsz = 0; char *buf = NULL; if (rpnp) { buf = rpnp->pn_buf; bufsz = rpnp->pn_bufsize; } if (exact) mt = MT_EXACT; /* * In the non-mixed case we only expect there would ever * be one match, but we need to use the normalizing lookup. */ error = zap_lookup_norm(zsb->z_os, dzp->z_id, name, 8, 1, zoid, mt, buf, bufsz, &conflict); if (!error && deflags) *deflags = conflict ? ED_CASE_CONFLICT : 0; } else { error = zap_lookup(zsb->z_os, dzp->z_id, name, 8, 1, zoid); } *zoid = ZFS_DIRENT_OBJ(*zoid); #ifdef HAVE_DNLC if (error == ENOENT && update) dnlc_update(ZTOI(dzp), name, DNLC_NO_VNODE); #endif /* HAVE_DNLC */ return (error); }
/* * Link zp into dl. Can only fail if zp has been unlinked. */ int zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) { znode_t *dzp = dl->dl_dzp; zfsvfs_t *zfsvfs = zp->z_zfsvfs; vnode_t *vp = ZTOV(zp); uint64_t value; int zp_is_dir = (vp->v_type == VDIR); sa_bulk_attr_t bulk[5]; uint64_t mtime[2], ctime[2]; int count = 0; int error; mutex_enter(&zp->z_lock); if (!(flag & ZRENAMING)) { if (zp->z_unlinked) { /* no new links to unlinked zp */ ASSERT(!(flag & (ZNEW | ZEXISTS))); mutex_exit(&zp->z_lock); return (ENOENT); } zp->z_links++; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &zp->z_links, sizeof (zp->z_links)); } SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &dzp->z_id, sizeof (dzp->z_id)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); if (!(flag & ZNEW)) { SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime, B_TRUE); } error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); ASSERT(error == 0); mutex_exit(&zp->z_lock); mutex_enter(&dzp->z_lock); dzp->z_size++; dzp->z_links += zp_is_dir; count = 0; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, &dzp->z_size, sizeof (dzp->z_size)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &dzp->z_links, sizeof (dzp->z_links)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, sizeof (mtime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &dzp->z_pflags, sizeof (dzp->z_pflags)); zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE); error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx); ASSERT(error == 0); mutex_exit(&dzp->z_lock); value = zfs_dirent(zp, zp->z_mode); error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name, 8, 1, &value, tx); ASSERT(error == 0); dnlc_update(ZTOV(dzp), dl->dl_name, vp); return (0); }
/* * Lock a directory entry. A dirlock on <dzp, name> protects that name * in dzp's directory zap object. As long as you hold a dirlock, you can * assume two things: (1) dzp cannot be reaped, and (2) no other thread * can change the zap entry for (i.e. link or unlink) this name. * * Input arguments: * dzp - znode for directory * name - name of entry to lock * flag - ZNEW: if the entry already exists, fail with EEXIST. * ZEXISTS: if the entry does not exist, fail with ENOENT. * ZSHARED: allow concurrent access with other ZSHARED callers. * ZXATTR: we want dzp's xattr directory * ZCILOOK: On a mixed sensitivity file system, * this lookup should be case-insensitive. * ZCIEXACT: On a purely case-insensitive file system, * this lookup should be case-sensitive. * ZRENAMING: we are locking for renaming, force narrow locks * ZHAVELOCK: Don't grab the z_name_lock for this call. The * current thread already holds it. * * Output arguments: * zpp - pointer to the znode for the entry (NULL if there isn't one) * dlpp - pointer to the dirlock for this entry (NULL on error) * direntflags - (case-insensitive lookup only) * flags if multiple case-sensitive matches exist in directory * realpnp - (case-insensitive lookup only) * actual name matched within the directory * * Return value: 0 on success or errno on failure. * * NOTE: Always checks for, and rejects, '.' and '..'. * NOTE: For case-insensitive file systems we take wide locks (see below), * but return znode pointers to a single match. */ int zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, int flag, int *direntflags, pathname_t *realpnp) { zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfs_dirlock_t *dl; boolean_t update; boolean_t exact; uint64_t zoid; vnode_t *vp = NULL; int error = 0; int cmpflags; *zpp = NULL; *dlpp = NULL; /* * Verify that we are not trying to lock '.', '..', or '.zfs' */ if ((name[0] == '.' && (name[1] == '\0' || (name[1] == '.' && name[2] == '\0'))) || (zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0)) return (EEXIST); /* * Case sensitivity and normalization preferences are set when * the file system is created. These are stored in the * zfsvfs->z_case and zfsvfs->z_norm fields. These choices * affect what vnodes can be cached in the DNLC, how we * perform zap lookups, and the "width" of our dirlocks. * * A normal dirlock locks a single name. Note that with * normalization a name can be composed multiple ways, but * when normalized, these names all compare equal. A wide * dirlock locks multiple names. We need these when the file * system is supporting mixed-mode access. It is sometimes * necessary to lock all case permutations of file name at * once so that simultaneous case-insensitive/case-sensitive * behaves as rationally as possible. */ /* * Decide if exact matches should be requested when performing * a zap lookup on file systems supporting case-insensitive * access. */ exact = ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE) && (flag & ZCIEXACT)) || ((zfsvfs->z_case == ZFS_CASE_MIXED) && !(flag & ZCILOOK)); /* * Only look in or update the DNLC if we are looking for the * name on a file system that does not require normalization * or case folding. We can also look there if we happen to be * on a non-normalizing, mixed sensitivity file system IF we * are looking for the exact name. * * Maybe can add TO-UPPERed version of name to dnlc in ci-only * case for performance improvement? */ update = !zfsvfs->z_norm || ((zfsvfs->z_case == ZFS_CASE_MIXED) && !(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER) && !(flag & ZCILOOK)); /* * ZRENAMING indicates we are in a situation where we should * take narrow locks regardless of the file system's * preferences for normalizing and case folding. This will * prevent us deadlocking trying to grab the same wide lock * twice if the two names happen to be case-insensitive * matches. */ if (flag & ZRENAMING) cmpflags = 0; else cmpflags = zfsvfs->z_norm; /* * Wait until there are no locks on this name. * * Don't grab the the lock if it is already held. However, cannot * have both ZSHARED and ZHAVELOCK together. */ ASSERT(!(flag & ZSHARED) || !(flag & ZHAVELOCK)); if (!(flag & ZHAVELOCK)) rw_enter(&dzp->z_name_lock, RW_READER); mutex_enter(&dzp->z_lock); for (;;) { if (dzp->z_unlinked) { mutex_exit(&dzp->z_lock); if (!(flag & ZHAVELOCK)) rw_exit(&dzp->z_name_lock); return (ENOENT); } for (dl = dzp->z_dirlocks; dl != NULL; dl = dl->dl_next) { if ((u8_strcmp(name, dl->dl_name, 0, cmpflags, U8_UNICODE_LATEST, &error) == 0) || error != 0) break; } if (error != 0) { mutex_exit(&dzp->z_lock); if (!(flag & ZHAVELOCK)) rw_exit(&dzp->z_name_lock); return (ENOENT); } if (dl == NULL) { /* * Allocate a new dirlock and add it to the list. */ dl = kmem_alloc(sizeof (zfs_dirlock_t), KM_SLEEP); cv_init(&dl->dl_cv, NULL, CV_DEFAULT, NULL); dl->dl_name = name; dl->dl_sharecnt = 0; dl->dl_namelock = 0; dl->dl_namesize = 0; dl->dl_dzp = dzp; dl->dl_next = dzp->z_dirlocks; dzp->z_dirlocks = dl; break; } if ((flag & ZSHARED) && dl->dl_sharecnt != 0) break; cv_wait(&dl->dl_cv, &dzp->z_lock); } /* * If the z_name_lock was NOT held for this dirlock record it. */ if (flag & ZHAVELOCK) dl->dl_namelock = 1; if ((flag & ZSHARED) && ++dl->dl_sharecnt > 1 && dl->dl_namesize == 0) { /* * We're the second shared reference to dl. Make a copy of * dl_name in case the first thread goes away before we do. * Note that we initialize the new name before storing its * pointer into dl_name, because the first thread may load * dl->dl_name at any time. He'll either see the old value, * which is his, or the new shared copy; either is OK. */ dl->dl_namesize = strlen(dl->dl_name) + 1; name = kmem_alloc(dl->dl_namesize, KM_SLEEP); bcopy(dl->dl_name, name, dl->dl_namesize); dl->dl_name = name; } mutex_exit(&dzp->z_lock); /* * We have a dirlock on the name. (Note that it is the dirlock, * not the dzp's z_lock, that protects the name in the zap object.) * See if there's an object by this name; if so, put a hold on it. */ if (flag & ZXATTR) { error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &zoid, sizeof (zoid)); if (error == 0) error = (zoid == 0 ? ENOENT : 0); } else { if (update) vp = dnlc_lookup(ZTOV(dzp), name); if (vp == DNLC_NO_VNODE) { VN_RELE(vp); error = ENOENT; } else if (vp) { if (flag & ZNEW) { zfs_dirent_unlock(dl); VN_RELE(vp); return (EEXIST); } *dlpp = dl; *zpp = VTOZ(vp); return (0); } else { error = zfs_match_find(zfsvfs, dzp, name, exact, update, direntflags, realpnp, &zoid); } } if (error) { if (error != ENOENT || (flag & ZEXISTS)) { zfs_dirent_unlock(dl); return (error); } } else { if (flag & ZNEW) { zfs_dirent_unlock(dl); return (EEXIST); } error = zfs_zget(zfsvfs, zoid, zpp); if (error) { zfs_dirent_unlock(dl); return (error); } if (!(flag & ZXATTR) && update) dnlc_update(ZTOV(dzp), name, ZTOV(*zpp)); } *dlpp = dl; return (0); }
/* * Lock a directory entry. A dirlock on <dzp, name> protects that name * in dzp's directory zap object. As long as you hold a dirlock, you can * assume two things: (1) dzp cannot be reaped, and (2) no other thread * can change the zap entry for (i.e. link or unlink) this name. * * Input arguments: * dzp - znode for directory * name - name of entry to lock * flag - ZNEW: if the entry already exists, fail with EEXIST. * ZEXISTS: if the entry does not exist, fail with ENOENT. * ZSHARED: allow concurrent access with other ZSHARED callers. * ZXATTR: we want dzp's xattr directory * * Output arguments: * zpp - pointer to the znode for the entry (NULL if there isn't one) * dlpp - pointer to the dirlock for this entry (NULL on error) * * Return value: 0 on success or errno on failure. * * NOTE: Always checks for, and rejects, '.' and '..'. */ int zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, int flag) { zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfs_dirlock_t *dl; uint64_t zoid; int error; vnode_t *vp; *zpp = NULL; *dlpp = NULL; /* * Verify that we are not trying to lock '.', '..', or '.zfs' */ if (name[0] == '.' && (name[1] == '\0' || (name[1] == '.' && name[2] == '\0')) || zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0) return (EEXIST); /* * Wait until there are no locks on this name. */ mutex_enter(&dzp->z_lock); for (;;) { if (dzp->z_reap) { mutex_exit(&dzp->z_lock); return (ENOENT); } for (dl = dzp->z_dirlocks; dl != NULL; dl = dl->dl_next) if (strcmp(name, dl->dl_name) == 0) break; if (dl == NULL) { /* * Allocate a new dirlock and add it to the list. */ dl = kmem_alloc(sizeof (zfs_dirlock_t), KM_SLEEP); cv_init(&dl->dl_cv, NULL, CV_DEFAULT, NULL); dl->dl_name = name; dl->dl_sharecnt = 0; dl->dl_namesize = 0; dl->dl_dzp = dzp; dl->dl_next = dzp->z_dirlocks; dzp->z_dirlocks = dl; break; } if ((flag & ZSHARED) && dl->dl_sharecnt != 0) break; cv_wait(&dl->dl_cv, &dzp->z_lock); } if ((flag & ZSHARED) && ++dl->dl_sharecnt > 1 && dl->dl_namesize == 0) { /* * We're the second shared reference to dl. Make a copy of * dl_name in case the first thread goes away before we do. * Note that we initialize the new name before storing its * pointer into dl_name, because the first thread may load * dl->dl_name at any time. He'll either see the old value, * which is his, or the new shared copy; either is OK. */ dl->dl_namesize = strlen(dl->dl_name) + 1; name = kmem_alloc(dl->dl_namesize, KM_SLEEP); bcopy(dl->dl_name, name, dl->dl_namesize); dl->dl_name = name; } mutex_exit(&dzp->z_lock); /* * We have a dirlock on the name. (Note that it is the dirlock, * not the dzp's z_lock, that protects the name in the zap object.) * See if there's an object by this name; if so, put a hold on it. */ if (flag & ZXATTR) { zoid = dzp->z_phys->zp_xattr; error = (zoid == 0 ? ENOENT : 0); } else { vp = dnlc_lookup(ZTOV(dzp), name); if (vp == DNLC_NO_VNODE) { VN_RELE(vp); error = ENOENT; } else if (vp) { if (flag & ZNEW) { zfs_dirent_unlock(dl); VN_RELE(vp); return (EEXIST); } *dlpp = dl; *zpp = VTOZ(vp); return (0); } else { error = zap_lookup(zfsvfs->z_os, dzp->z_id, name, 8, 1, &zoid); if (error == ENOENT) dnlc_update(ZTOV(dzp), name, DNLC_NO_VNODE); } } if (error) { if (error != ENOENT || (flag & ZEXISTS)) { zfs_dirent_unlock(dl); return (error); } } else { if (flag & ZNEW) { zfs_dirent_unlock(dl); return (EEXIST); } error = zfs_zget(zfsvfs, zoid, zpp); if (error) { zfs_dirent_unlock(dl); return (error); } if (!(flag & ZXATTR)) dnlc_update(ZTOV(dzp), name, ZTOV(*zpp)); } *dlpp = dl; return (0); }
zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, int flag) #endif { zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfs_dirlock_t *dl; uint64_t zoid; int error; vnode_t *vp; #ifdef __APPLE__ char *name; u_int8_t *nfc_name = NULL; /* NFC form of name */ int nfc_namesize = 0; #endif *zpp = NULL; *dlpp = NULL; #ifdef __APPLE__ /* Note: cnp will be NULL for ZXATTR case */ name = cnp ? cnp->cn_nameptr : ""; if (cnp) ASSERT(name[cnp->cn_namelen] == '\0'); #endif /* * Verify that we are not trying to lock '.', '..', or '.zfs' */ if ((name[0] == '.') && ((name[1] == '\0') || ((name[1] == '.') && (name[2] == '\0'))) || zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0) return (EEXIST); #ifdef __APPLE__ /* * Mac OS X: store non-ascii names in UTF-8 NFC (pre-composed) on disk. * * The NFC name ptr is stored in dl->dl_name (allocated here) * and its freed by zfs_dirent_unlock (since dl_namesize != 0). * * Since NFC size will not expand, we can allocate the same sized buffer. */ if (!is_ascii_str(name)) { size_t outlen; nfc_namesize = strlen(name) + 1; nfc_name = kmem_alloc(nfc_namesize, KM_SLEEP); if (utf8_normalizestr((const u_int8_t *)name, nfc_namesize, nfc_name, &outlen, nfc_namesize, UTF_PRECOMPOSED) == 0) { /* Normalization succeeded, switch to NFC name. */ name = (char *)nfc_name; } else { /* Normalization failed, just use input name as-is. */ kmem_free(nfc_name, nfc_namesize); nfc_name = NULL; } } #endif /* * Wait until there are no locks on this name. */ rw_enter(&dzp->z_name_lock, RW_READER); mutex_enter(&dzp->z_lock); for (;;) { if (dzp->z_unlinked) { mutex_exit(&dzp->z_lock); rw_exit(&dzp->z_name_lock); #ifdef __APPLE__ /* Release any unused NFC name before returning */ if (nfc_name) { kmem_free(nfc_name, nfc_namesize); } #endif return (ENOENT); } for (dl = dzp->z_dirlocks; dl != NULL; dl = dl->dl_next) if (strcmp(name, dl->dl_name) == 0) break; if (dl == NULL) { /* * Allocate a new dirlock and add it to the list. */ dl = kmem_alloc(sizeof (zfs_dirlock_t), KM_SLEEP); cv_init(&dl->dl_cv, NULL, CV_DEFAULT, NULL); dl->dl_name = name; dl->dl_sharecnt = 0; dl->dl_namesize = 0; dl->dl_dzp = dzp; dl->dl_next = dzp->z_dirlocks; dzp->z_dirlocks = dl; #ifdef __APPLE__ /* * Keep the NFC name around in dir lock by tagging it * (setting nfc_namesize). */ if (nfc_name) { dl->dl_namesize = nfc_namesize; nfc_name = NULL; /* its now part of the dir lock */ } #endif break; } if ((flag & ZSHARED) && dl->dl_sharecnt != 0) break; cv_wait(&dl->dl_cv, &dzp->z_lock); dl=NULL; } #ifdef __APPLE__ /* * Release any unused NFC name (ie if we found a pre-existing lock entry) */ if (nfc_name) { kmem_free(nfc_name, nfc_namesize); nfc_name = NULL; } #endif if ((flag & ZSHARED) && ++dl->dl_sharecnt > 1 && dl->dl_namesize == 0) { /* * We're the second shared reference to dl. Make a copy of * dl_name in case the first thread goes away before we do. * Note that we initialize the new name before storing its * pointer into dl_name, because the first thread may load * dl->dl_name at any time. He'll either see the old value, * which is his, or the new shared copy; either is OK. */ dl->dl_namesize = strlen(dl->dl_name) + 1; name = kmem_alloc(dl->dl_namesize, KM_SLEEP); bcopy(dl->dl_name, name, dl->dl_namesize); dl->dl_name = name; } mutex_exit(&dzp->z_lock); /* * We have a dirlock on the name. (Note that it is the dirlock, * not the dzp's z_lock, that protects the name in the zap object.) * See if there's an object by this name; if so, put a hold on it. */ if (flag & ZXATTR) { error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &zoid, sizeof (zoid)); if (error == 0) error = (zoid == 0 ? ENOENT : 0); } else { #ifdef __APPLE__ /* * Lookup an entry in the vnode name cache * * If the lookup succeeds, the vnode is returned in *vpp, * and a status of -1 is returned. * * If the lookup determines that the name does not exist * (negative caching), a status of ENOENT is returned. * * If the lookup fails, a status of zero is returned. */ switch ( cache_lookup(ZTOV(dzp), &vp, cnp) ) { case -1: break; case ENOENT: vp = DNLC_NO_VNODE; break; default: vp = NULLVP; } #else vp = dnlc_lookup(ZTOV(dzp), name); #endif /* __APPLE__ */ if (vp == DNLC_NO_VNODE) { VN_RELE(vp); error = ENOENT; } else if (vp) { if (flag & ZNEW) { zfs_dirent_unlock(dl); VN_RELE(vp); return (EEXIST); } *dlpp = dl; *zpp = VTOZ(vp); return (0); } else { error = zap_lookup(zfsvfs->z_os, dzp->z_id, name, 8, 1, &zoid); zoid = ZFS_DIRENT_OBJ(zoid); if (error == ENOENT) #ifdef __APPLE__ /* * Add a negative entry into the VFS name cache */ if ((flag & ZNEW) == 0 && (dzp->z_pflags & ZFS_XATTR) == 0 && (cnp) && (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && (cnp->cn_nameiop != RENAME)) { cache_enter(ZTOV(dzp), NULLVP, cnp); } #else dnlc_update(ZTOV(dzp), name, DNLC_NO_VNODE); #endif /* __APPLE__ */ } } if (error) { if (error != ENOENT || (flag & ZEXISTS)) { zfs_dirent_unlock(dl); return (error); } } else { if (flag & ZNEW) { zfs_dirent_unlock(dl); return (EEXIST); } //error = zfs_zget_sans_vnode(zfsvfs, zoid, zpp); error = zfs_zget(zfsvfs, zoid, zpp); if (error) { zfs_dirent_unlock(dl); return (error); } else { // Should this be here? //printf("zfs_dir attach 1\n"); //zfs_attach_vnode(*zpp); } if (!(flag & ZXATTR)) #ifdef __APPLE__ if (cnp && cnp->cn_flags & MAKEENTRY) cache_enter(ZTOV(dzp), ZTOV(*zpp), cnp); #else dnlc_update(ZTOV(dzp), name, ZTOV(*zpp)); #endif /* __APPLE__ */ } *dlpp = dl; return (0); }
/* * Link zp into dl. Can only fail if zp has been unlinked. */ int zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) { znode_t *dzp = dl->dl_dzp; zfsvfs_t *zfsvfs = zp->z_zfsvfs; #ifdef __APPLE__ uint64_t value; /* OSX - don't access the vnode here since it might not be attached yet. */ //int zp_is_dir = S_ISDIR(zp->z_phys->zp_mode); int zp_is_dir = S_ISDIR(zp->z_mode); #else vnode_t *vp = ZTOV(zp); uint64_t value; int zp_is_dir = (vp->v_type == VDIR); #endif sa_bulk_attr_t bulk[5]; uint64_t mtime[2], ctime[2]; int count = 0; int error; mutex_enter(&zp->z_lock); if (!(flag & ZRENAMING)) { if (zp->z_unlinked) { /* no new links to unlinked zp */ ASSERT(!(flag & (ZNEW | ZEXISTS))); mutex_exit(&zp->z_lock); return (ENOENT); } zp->z_links++; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &zp->z_links, sizeof (zp->z_links)); } SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &dzp->z_id, sizeof (dzp->z_id)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); if (!(flag & ZNEW)) { SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime, B_TRUE); } error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); // Needed? #ifdef __APPLE__ zp->z_parent = dzp->z_id; #endif mutex_exit(&zp->z_lock); mutex_enter(&dzp->z_lock); dzp->z_size++; dzp->z_links += zp_is_dir; count = 0; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, &dzp->z_size, sizeof (dzp->z_size)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &dzp->z_links, sizeof (dzp->z_links)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, sizeof (mtime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &dzp->z_pflags, sizeof (dzp->z_pflags)); zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE); error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx); mutex_exit(&dzp->z_lock); value = zfs_dirent(zp, zp->z_mode); error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name, 8, 1, &value, tx); ASSERT(error == 0); #ifndef __APPLE__ /* On Mac OS X, this is done up in VFS layer. */ dnlc_update(ZTOV(dzp), dl->dl_name, vp); #endif return (0); }