static void nfs4_acls_test(void) { int i = 0; fsal_acl_data_t acldata, acldata2; fsal_ace_t *ace = NULL; fsal_acl_t *acl = NULL; fsal_acl_status_t status; acldata.naces = 3; acldata.aces = nfs4_ace_alloc(3); LogDebug(COMPONENT_NFS_V4_ACL, "acldata.aces = %p", acldata.aces); ace = acldata.aces; for (i = 0; i < 3; i++) { ace->type = i; ace->perm = i; ace->flag = i; ace->who.uid = i; ace++; } acl = nfs4_acl_new_entry(&acldata, &status); PTHREAD_RWLOCK_rdlock(&acl->lock); LogDebug(COMPONENT_NFS_V4_ACL, "acl = %p, ref = %u, status = %u", acl, acl->ref, status); PTHREAD_RWLOCK_unlock(&acl->lock); acldata2.naces = 3; acldata2.aces = nfs4_ace_alloc(3); LogDebug(COMPONENT_NFS_V4_ACL, "acldata2.aces = %p", acldata2.aces); ace = acldata2.aces; for (i = 0; i < 3; i++) { ace->type = i; ace->perm = i; ace->flag = i; ace->who.uid = i; ace++; } acl = nfs4_acl_new_entry(&acldata2, &status); PTHREAD_RWLOCK_rdlock(&acl->lock); LogDebug(COMPONENT_NFS_V4_ACL, "re-access: acl = %p, ref = %u, status = %u", acl, acl->ref, status); PTHREAD_RWLOCK_unlock(&acl->lock); nfs4_acl_release_entry(acl, &status); PTHREAD_RWLOCK_rdlock(&acl->lock); LogDebug(COMPONENT_NFS_V4_ACL, "release: acl = %p, ref = %u, status = %u", acl, acl->ref, status); PTHREAD_RWLOCK_unlock(&acl->lock); nfs4_acl_release_entry(acl, &status); }
fsal_status_t vfs_commit(struct fsal_obj_handle *obj_hdl, /* sync */ off_t offset, size_t len) { struct vfs_fsal_obj_handle *myself; fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR; int retval = 0; myself = container_of(obj_hdl, struct vfs_fsal_obj_handle, obj_handle); if (obj_hdl->fsal != obj_hdl->fs->fsal) { LogDebug(COMPONENT_FSAL, "FSAL %s operation for handle belonging to FSAL %s, return EXDEV", obj_hdl->fsal->name, obj_hdl->fs->fsal->name); retval = EXDEV; fsal_error = posix2fsal_error(retval); return fsalstat(fsal_error, retval); } /* Take read lock on object to protect file descriptor. */ PTHREAD_RWLOCK_rdlock(&obj_hdl->lock); assert(myself->u.file.fd >= 0 && myself->u.file.openflags != FSAL_O_CLOSED); retval = fsync(myself->u.file.fd); if (retval == -1) { retval = errno; fsal_error = posix2fsal_error(retval); } PTHREAD_RWLOCK_unlock(&obj_hdl->lock); return fsalstat(fsal_error, retval); }
/** * @brief Get supplementary groups given uid * * @param[in] uid The uid of the user * @param[out] group_data * * @return true if successful, false otherwise */ bool uid2grp(uid_t uid, struct group_data **gdata) { bool success = false; PTHREAD_RWLOCK_rdlock(&uid2grp_user_lock); success = uid2grp_lookup_by_uid(uid, gdata); /* Handle common case first */ if (success && !uid2grp_expired(*gdata)) { uid2grp_hold_group_data(*gdata); PTHREAD_RWLOCK_unlock(&uid2grp_user_lock); return success; } PTHREAD_RWLOCK_unlock(&uid2grp_user_lock); if (success) { /* Cache entry is expired */ PTHREAD_RWLOCK_wrlock(&uid2grp_user_lock); uid2grp_remove_by_uid(uid); PTHREAD_RWLOCK_unlock(&uid2grp_user_lock); } *gdata = uid2grp_allocate_by_uid(uid); PTHREAD_RWLOCK_wrlock(&uid2grp_user_lock); if (*gdata) uid2grp_add_user(*gdata); success = uid2grp_lookup_by_uid(uid, gdata); if (success) uid2grp_hold_group_data(*gdata); PTHREAD_RWLOCK_unlock(&uid2grp_user_lock); return success; }
fsal_status_t vfs_lru_cleanup(struct fsal_obj_handle *obj_hdl, lru_actions_t requests) { struct vfs_fsal_obj_handle *myself; fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR; int retval = 0; myself = container_of(obj_hdl, struct vfs_fsal_obj_handle, obj_handle); if (obj_hdl->fsal != obj_hdl->fs->fsal) { LogDebug(COMPONENT_FSAL, "FSAL %s operation for handle belonging to FSAL %s, return EXDEV", obj_hdl->fsal->name, obj_hdl->fs->fsal->name); retval = EXDEV; fsal_error = posix2fsal_error(retval); return fsalstat(fsal_error, retval); } /* Take read lock on object to protect file descriptor. */ PTHREAD_RWLOCK_rdlock(&obj_hdl->lock); if (obj_hdl->type == REGULAR_FILE && myself->u.file.fd >= 0) { retval = close(myself->u.file.fd); myself->u.file.fd = -1; myself->u.file.openflags = FSAL_O_CLOSED; } if (retval == -1) { retval = errno; fsal_error = posix2fsal_error(retval); } PTHREAD_RWLOCK_unlock(&obj_hdl->lock); return fsalstat(fsal_error, retval); }
void hashtable_log(log_components_t component, struct hash_table *ht) { /* The current position in the hash table */ struct rbt_node *it = NULL; /* The root of the tree currently being inspected */ struct rbt_head *root; /* Buffer descriptors for the key and value */ struct hash_data *data = NULL; /* String representation of the key */ char dispkey[HASHTABLE_DISPLAY_STRLEN]; /* String representation of the stored value */ char dispval[HASHTABLE_DISPLAY_STRLEN]; /* Index for traversing the partitions */ uint32_t i = 0; /* Running count of entries */ size_t nb_entries = 0; /* Recomputed partitionindex */ uint32_t index = 0; /* Recomputed hash for Red-Black tree */ uint64_t rbt_hash = 0; LogFullDebug(component, "The hash is partitioned into %d trees", ht->parameter.index_size); for (i = 0; i < ht->parameter.index_size; i++) nb_entries += ht->partitions[i].count; LogFullDebug(component, "The hash contains %zd entries", nb_entries); for (i = 0; i < ht->parameter.index_size; i++) { root = &ht->partitions[i].rbt; LogFullDebug(component, "The partition in position %" PRIu32 "contains: %u entries", i, root->rbt_num_node); PTHREAD_RWLOCK_rdlock(&ht->partitions[i].lock); RBT_LOOP(root, it) { data = it->rbt_opaq; ht->parameter.key_to_str(&(data->key), dispkey); ht->parameter.val_to_str(&(data->val), dispval); if (compute(ht, &data->key, &index, &rbt_hash) != HASHTABLE_SUCCESS) { LogCrit(component, "Possible implementation error in hash_func_both"); index = 0; rbt_hash = 0; } LogFullDebug(component, "%s => %s; index=%" PRIu32 " rbt_hash=%" PRIu64, dispkey, dispval, index, rbt_hash); RBT_INCREMENT(it); } PTHREAD_RWLOCK_unlock(&ht->partitions[i].lock); }
fsal_status_t vfs_write(struct fsal_obj_handle *obj_hdl, uint64_t offset, size_t buffer_size, void *buffer, size_t *write_amount, bool *fsal_stable) { struct vfs_fsal_obj_handle *myself; ssize_t nb_written; fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR; int retval = 0; myself = container_of(obj_hdl, struct vfs_fsal_obj_handle, obj_handle); if (obj_hdl->fsal != obj_hdl->fs->fsal) { LogDebug(COMPONENT_FSAL, "FSAL %s operation for handle belonging to FSAL %s, return EXDEV", obj_hdl->fsal->name, obj_hdl->fs->fsal->name); retval = EXDEV; fsal_error = posix2fsal_error(retval); return fsalstat(fsal_error, retval); } /* Take read lock on object to protect file descriptor. */ PTHREAD_RWLOCK_rdlock(&obj_hdl->lock); assert(myself->u.file.fd >= 0 && myself->u.file.openflags != FSAL_O_CLOSED); fsal_set_credentials(op_ctx->creds); nb_written = pwrite(myself->u.file.fd, buffer, buffer_size, offset); if (offset == -1 || nb_written == -1) { retval = errno; fsal_error = posix2fsal_error(retval); goto out; } *write_amount = nb_written; /* attempt stability */ if (fsal_stable != NULL && *fsal_stable) { retval = fsync(myself->u.file.fd); if (retval == -1) { retval = errno; fsal_error = posix2fsal_error(retval); } *fsal_stable = true; } out: PTHREAD_RWLOCK_unlock(&obj_hdl->lock); fsal_restore_ganesha_credentials(); return fsalstat(fsal_error, retval); }
struct gsh_client *get_gsh_client(sockaddr_t *client_ipaddr, bool lookup_only) { struct avltree_node *node = NULL; struct gsh_client *cl; struct server_stats *server_st; struct gsh_client v; uint8_t *addr = NULL; uint32_t ipaddr; int addr_len = 0; void **cache_slot; switch (client_ipaddr->ss_family) { case AF_INET: addr = (uint8_t *) &((struct sockaddr_in *)client_ipaddr)-> sin_addr; addr_len = 4; memcpy(&ipaddr, (uint8_t *) &((struct sockaddr_in *)client_ipaddr)-> sin_addr, sizeof(ipaddr)); break; case AF_INET6: addr = (uint8_t *) &((struct sockaddr_in6 *)client_ipaddr)-> sin6_addr; addr_len = 16; memcpy(&ipaddr, (uint8_t *) &((struct sockaddr_in6 *)client_ipaddr)-> sin6_addr, sizeof(ipaddr)); break; default: assert(0); } v.addr.addr = addr; v.addr.len = addr_len; PTHREAD_RWLOCK_rdlock(&client_by_ip.lock); /* check cache */ cache_slot = (void **) &(client_by_ip.cache[eip_cache_offsetof(&client_by_ip, ipaddr)]); node = (struct avltree_node *)atomic_fetch_voidptr(cache_slot); if (node) { if (client_ip_cmpf(&v.node_k, node) == 0) { /* got it in 1 */ LogDebug(COMPONENT_HASHTABLE_CACHE, "client_mgr cache hit slot %d\n", eip_cache_offsetof(&client_by_ip, ipaddr)); cl = avltree_container_of(node, struct gsh_client, node_k); goto out; } }
fsal_status_t vfs_read(struct fsal_obj_handle *obj_hdl, uint64_t offset, size_t buffer_size, void *buffer, size_t *read_amount, bool *end_of_file) { struct vfs_fsal_obj_handle *myself; ssize_t nb_read; fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR; int retval = 0; myself = container_of(obj_hdl, struct vfs_fsal_obj_handle, obj_handle); if (obj_hdl->fsal != obj_hdl->fs->fsal) { LogDebug(COMPONENT_FSAL, "FSAL %s operation for handle belonging to FSAL %s, return EXDEV", obj_hdl->fsal->name, obj_hdl->fs->fsal->name); retval = EXDEV; fsal_error = posix2fsal_error(retval); return fsalstat(fsal_error, retval); } /* Take read lock on object to protect file descriptor. */ PTHREAD_RWLOCK_rdlock(&obj_hdl->lock); assert(myself->u.file.fd >= 0 && myself->u.file.openflags != FSAL_O_CLOSED); nb_read = pread(myself->u.file.fd, buffer, buffer_size, offset); if (offset == -1 || nb_read == -1) { retval = errno; fsal_error = posix2fsal_error(retval); goto out; } *read_amount = nb_read; /* dual eof condition */ *end_of_file = ((nb_read == 0) /* most clients */ || /* ESXi */ (((offset + nb_read) >= myself->attributes.filesize))) ? true : false; out: PTHREAD_RWLOCK_unlock(&obj_hdl->lock); return fsalstat(fsal_error, retval); }
cache_inode_status_t cache_inode_readlink(cache_entry_t *entry, struct gsh_buffdesc *link_content) { cache_inode_status_t status = CACHE_INODE_SUCCESS; fsal_status_t fsal_status = { ERR_FSAL_NO_ERROR, 0 }; bool refresh = false; if (entry->type != SYMBOLIC_LINK) { status = CACHE_INODE_BAD_TYPE; return status; } PTHREAD_RWLOCK_rdlock(&entry->content_lock); if (!(entry->flags & CACHE_INODE_TRUST_CONTENT)) { /* Our data are stale. Drop the lock, get a write-lock, load in new data, and copy it out to the caller. */ PTHREAD_RWLOCK_unlock(&entry->content_lock); PTHREAD_RWLOCK_wrlock(&entry->content_lock); /* Make sure nobody updated the content while we were waiting. */ refresh = !(entry->flags & CACHE_INODE_TRUST_CONTENT); } fsal_status = entry->obj_handle->ops->readlink(entry->obj_handle, link_content, refresh); if (refresh && !(FSAL_IS_ERROR(fsal_status))) atomic_set_uint32_t_bits(&entry->flags, CACHE_INODE_TRUST_CONTENT); PTHREAD_RWLOCK_unlock(&entry->content_lock); if (FSAL_IS_ERROR(fsal_status)) { status = cache_inode_error_convert(fsal_status); if (fsal_status.major == ERR_FSAL_STALE) { LogEvent(COMPONENT_CACHE_INODE, "FSAL returned STALE from readlink"); cache_inode_kill_entry(entry); } return status; } return status; }
/** * @brief Renames an entry * * This function calls the FSAL to rename a file, then mirrors the * operation in the cache. * * @param[in] dir_src The source directory * @param[in] oldname The current name of the file * @param[in] dir_dest The destination directory * @param[in] newname The name to be assigned to the object * * @retval CACHE_INODE_SUCCESS if operation is a success. * @retval CACHE_INODE_NOT_FOUND if source object does not exist * @retval CACHE_INODE_ENTRY_EXISTS on collision. * @retval CACHE_INODE_NOT_A_DIRECTORY if dir_src or dir_dest is not a * directory. * @retval CACHE_INODE_BADNAME if either name is "." or ".." */ cache_inode_status_t cache_inode_rename(cache_entry_t *dir_src, const char *oldname, cache_entry_t *dir_dest, const char *newname) { fsal_status_t fsal_status = { 0, 0 }; cache_entry_t *lookup_src = NULL; cache_entry_t *lookup_dst = NULL; cache_inode_status_t status = CACHE_INODE_SUCCESS; cache_inode_status_t status_ref_dir_src = CACHE_INODE_SUCCESS; cache_inode_status_t status_ref_dir_dst = CACHE_INODE_SUCCESS; cache_inode_status_t status_ref_dst = CACHE_INODE_SUCCESS; if ((dir_src->type != DIRECTORY) || (dir_dest->type != DIRECTORY)) { status = CACHE_INODE_NOT_A_DIRECTORY; goto out; } /* Check for . and .. on oldname and newname. */ if (!strcmp(oldname, ".") || !strcmp(oldname, "..") || !strcmp(newname, ".") || !strcmp(newname, "..")) { status = CACHE_INODE_BADNAME; goto out; } /* Check for object existence in source directory */ status = cache_inode_lookup_impl(dir_src, oldname, &lookup_src); if (lookup_src == NULL) { /* If FSAL FH is stale, then this was managed in * cache_inode_lookup */ if (status != CACHE_INODE_FSAL_ESTALE) status = CACHE_INODE_NOT_FOUND; LogEvent(COMPONENT_CACHE_INODE, "Rename (%p,%s)->(%p,%s) : source doesn't exist", dir_src, oldname, dir_dest, newname); goto out; } /* Do not rename a junction node or an export root. */ if (lookup_src->type == DIRECTORY) { /* Get attr_lock for looking at junction_export */ PTHREAD_RWLOCK_rdlock(&lookup_src->attr_lock); if (lookup_src->object.dir.junction_export != NULL || atomic_fetch_int32_t(&lookup_src->exp_root_refcount) != 0) { /* Trying to rename an export mount point */ LogCrit(COMPONENT_CACHE_INODE, "Attempt to rename export %s", oldname); /* Release attr_lock */ PTHREAD_RWLOCK_unlock(&lookup_src->attr_lock); status = CACHE_INODE_DIR_NOT_EMPTY; goto out; } /* Release attr_lock */ PTHREAD_RWLOCK_unlock(&lookup_src->attr_lock); } /* Check if an object with the new name exists in the destination directory */ status = cache_inode_lookup_impl(dir_dest, newname, &lookup_dst); if (status == CACHE_INODE_SUCCESS) { LogDebug(COMPONENT_CACHE_INODE, "Rename (%p,%s)->(%p,%s) : destination already exists", dir_src, oldname, dir_dest, newname); } if (status == CACHE_INODE_NOT_FOUND) status = CACHE_INODE_SUCCESS; if (status == CACHE_INODE_FSAL_ESTALE) { LogDebug(COMPONENT_CACHE_INODE, "Rename (%p,%s)->(%p,%s) : stale destination", dir_src, oldname, dir_dest, newname); } if (lookup_src == lookup_dst) { /* Nothing to do according to POSIX and NFS3/4 * If from and to both refer to the same file (they might be * hard links of each other), then RENAME should perform no * action and return success */ LogDebug(COMPONENT_CACHE_INODE, "Rename (%p,%s)->(%p,%s) : same file so skipping out", dir_src, oldname, dir_dest, newname); goto out; } /* Perform the rename operation in FSAL before doing anything in the * cache. Indeed, if the FSAL_rename fails unexpectly, the cache would * be inconsistent! * * We do almost no checking before making call because we want to return * error based on the files actually present in the directories, not * what we have in our cache. */ LogFullDebug(COMPONENT_CACHE_INODE, "about to call FSAL rename"); fsal_status = dir_src->obj_handle->ops->rename(dir_src->obj_handle, oldname, dir_dest->obj_handle, newname); LogFullDebug(COMPONENT_CACHE_INODE, "returned from FSAL rename"); status_ref_dir_src = cache_inode_refresh_attrs_locked(dir_src); if (dir_src != dir_dest) status_ref_dir_dst = cache_inode_refresh_attrs_locked(dir_dest); LogFullDebug(COMPONENT_CACHE_INODE, "done refreshing attributes"); if (FSAL_IS_ERROR(fsal_status)) { status = cache_inode_error_convert(fsal_status); LogFullDebug(COMPONENT_CACHE_INODE, "FSAL rename failed with %s", cache_inode_err_str(status)); goto out; } if (lookup_dst) { /* Force a refresh of the overwritten inode */ status_ref_dst = cache_inode_refresh_attrs_locked(lookup_dst); if (status_ref_dst == CACHE_INODE_FSAL_ESTALE) status_ref_dst = CACHE_INODE_SUCCESS; } status = status_ref_dir_src; if (status == CACHE_INODE_SUCCESS) status = status_ref_dir_dst; if (status == CACHE_INODE_SUCCESS) status = status_ref_dst; if (status != CACHE_INODE_SUCCESS) goto out; /* Must take locks on directories now, * because if another thread checks source and destination existence * in the same time, it will try to do the same checks... * and it will have the same conclusion !!! */ src_dest_lock(dir_src, dir_dest); if (lookup_dst) { /* Remove the entry from parent dir_entries avl */ status_ref_dir_dst = cache_inode_remove_cached_dirent(dir_dest, newname); if (status_ref_dir_dst != CACHE_INODE_SUCCESS) { LogDebug(COMPONENT_CACHE_INODE, "remove entry failed with status %s", cache_inode_err_str(status_ref_dir_dst)); cache_inode_invalidate_all_cached_dirent(dir_dest); } } if (dir_src == dir_dest) { /* if the rename operation is made within the same dir, then we * use an optimization: cache_inode_rename_dirent is used * instead of adding/removing dirent. This limits the use of * resource in this case */ LogDebug(COMPONENT_CACHE_INODE, "Rename (%p,%s)->(%p,%s) : source and target " "directory the same", dir_src, oldname, dir_dest, newname); cache_inode_status_t tmp_status = cache_inode_rename_cached_dirent(dir_dest, oldname, newname); if (tmp_status != CACHE_INODE_SUCCESS) { /* We're obviously out of date. Throw out the cached directory */ cache_inode_invalidate_all_cached_dirent(dir_dest); } } else { cache_inode_status_t tmp_status = CACHE_INODE_SUCCESS; LogDebug(COMPONENT_CACHE_INODE, "Rename (%p,%s)->(%p,%s) : moving entry", dir_src, oldname, dir_dest, newname); /* We may have a cache entry for the destination * filename. If we do, we must delete it : it is stale. */ tmp_status = cache_inode_remove_cached_dirent(dir_dest, newname); if (tmp_status != CACHE_INODE_SUCCESS && tmp_status != CACHE_INODE_NOT_FOUND) { LogDebug(COMPONENT_CACHE_INODE, "Remove stale dirent returned %s", cache_inode_err_str(tmp_status)); cache_inode_invalidate_all_cached_dirent(dir_dest); } tmp_status = cache_inode_add_cached_dirent(dir_dest, newname, lookup_src, NULL); if (tmp_status != CACHE_INODE_SUCCESS) { /* We're obviously out of date. Throw out the cached directory */ LogCrit(COMPONENT_CACHE_INODE, "Add dirent returned %s", cache_inode_err_str(tmp_status)); cache_inode_invalidate_all_cached_dirent(dir_dest); } /* Remove the old entry */ tmp_status = cache_inode_remove_cached_dirent(dir_src, oldname); if (tmp_status != CACHE_INODE_SUCCESS && tmp_status != CACHE_INODE_NOT_FOUND) { LogDebug(COMPONENT_CACHE_INODE, "Remove old dirent returned %s", cache_inode_err_str(tmp_status)); cache_inode_invalidate_all_cached_dirent(dir_src); } } /* unlock entries */ src_dest_unlock(dir_src, dir_dest); out: if (lookup_src) cache_inode_put(lookup_src); if (lookup_dst) cache_inode_put(lookup_dst); return status; }
cache_inode_status_t cache_inode_remove(cache_entry_t *entry, const char *name) { cache_entry_t *to_remove_entry = NULL; fsal_status_t fsal_status = { 0, 0 }; cache_inode_status_t status = CACHE_INODE_SUCCESS; cache_inode_status_t status_ref_entry = CACHE_INODE_SUCCESS; if (entry->type != DIRECTORY) { status = CACHE_INODE_NOT_A_DIRECTORY; goto out; } /* Factor this somewhat. In the case where the directory hasn't been populated, the entry may not exist in the cache and we'd be bringing it in just to dispose of it. */ /* Looks up for the entry to remove */ status = cache_inode_lookup_impl(entry, name, &to_remove_entry); if (to_remove_entry == NULL) { LogFullDebug(COMPONENT_CACHE_INODE, "lookup %s failure %s", name, cache_inode_err_str(status)); goto out; } /* Do not remove a junction node or an export root. */ if (to_remove_entry->type == DIRECTORY) { /* Get attr_lock for looking at junction_export */ PTHREAD_RWLOCK_rdlock(&to_remove_entry->attr_lock); if (to_remove_entry->object.dir.junction_export != NULL || atomic_fetch_int32_t(&to_remove_entry->exp_root_refcount) != 0) { /* Trying to remove an export mount point */ LogCrit(COMPONENT_CACHE_INODE, "Attempt to remove export %s", name); /* Release attr_lock */ PTHREAD_RWLOCK_unlock(&to_remove_entry->attr_lock); status = CACHE_INODE_DIR_NOT_EMPTY; goto out; } /* Release attr_lock */ PTHREAD_RWLOCK_unlock(&to_remove_entry->attr_lock); } LogDebug(COMPONENT_CACHE_INODE, "%s", name); if (is_open(to_remove_entry)) { /* entry is not locked and seems to be open for fd caching * purpose. * candidate for closing since unlink of an open file results * in 'silly rename' on certain platforms */ status = cache_inode_close(to_remove_entry, CACHE_INODE_FLAG_REALLYCLOSE); if (status != CACHE_INODE_SUCCESS) { /* non-fatal error. log the warning and move on */ LogCrit(COMPONENT_CACHE_INODE, "Error closing %s before unlink: %s.", name, cache_inode_err_str(status)); } } fsal_status = entry->obj_handle->obj_ops.unlink(entry->obj_handle, name); if (FSAL_IS_ERROR(fsal_status)) { if (fsal_status.major == ERR_FSAL_STALE) cache_inode_kill_entry(entry); status = cache_inode_error_convert(fsal_status); LogFullDebug(COMPONENT_CACHE_INODE, "unlink %s failure %s", name, cache_inode_err_str(status)); if (to_remove_entry->type == DIRECTORY && status == CACHE_INODE_DIR_NOT_EMPTY) { /* its dirent tree is probably stale, flush it * to try and make things right again */ PTHREAD_RWLOCK_wrlock(&to_remove_entry->content_lock); (void) cache_inode_invalidate_all_cached_dirent (to_remove_entry); PTHREAD_RWLOCK_unlock(&to_remove_entry->content_lock); } goto out; } /* Remove the entry from parent dir_entries avl */ PTHREAD_RWLOCK_wrlock(&entry->content_lock); status_ref_entry = cache_inode_remove_cached_dirent(entry, name); LogDebug(COMPONENT_CACHE_INODE, "cache_inode_remove_cached_dirent %s status %s", name, cache_inode_err_str(status_ref_entry)); PTHREAD_RWLOCK_unlock(&entry->content_lock); status_ref_entry = cache_inode_refresh_attrs_locked(entry); if (FSAL_IS_ERROR(fsal_status)) { status = cache_inode_error_convert(fsal_status); LogFullDebug(COMPONENT_CACHE_INODE, "not sure this code makes sense %s failure %s", name, cache_inode_err_str(status)); goto out; } /* Update the attributes for the removed entry */ (void)cache_inode_refresh_attrs_locked(to_remove_entry); status = status_ref_entry; if (status != CACHE_INODE_SUCCESS) { LogDebug(COMPONENT_CACHE_INODE, "cache_inode_refresh_attrs_locked(entry %p %s) " "returned %s", entry, name, cache_inode_err_str(status_ref_entry)); } out: LogFullDebug(COMPONENT_CACHE_INODE, "remove %s: status=%s", name, cache_inode_err_str(status)); /* This is for the reference taken by lookup */ if (to_remove_entry) cache_inode_put(to_remove_entry); return status; }
/** * @brief Check and look up the supplied stateid * * This function yields the state for the stateid if it is valid. * * @param[in] stateid Stateid to look up * @param[in] entry Associated file (if any) * @param[out] state Found state * @param[in] data Compound data * @param[in] flags Flags governing special stateids * @param[in] owner_seqid seqid on v4.0 owner * @param[in] check_seqid Whether to validate owner_seqid * @param[in] tag Arbitrary string for logging/debugging * * @return NFSv4 status codes */ nfsstat4 nfs4_Check_Stateid(stateid4 *stateid, cache_entry_t *entry, state_t **state, compound_data_t *data, int flags, seqid4 owner_seqid, bool check_seqid, const char *tag) { uint32_t epoch = 0; uint64_t epoch_low = ServerEpoch & 0xFFFFFFFF; state_t *state2 = NULL; /* string str has to accomodate stateid->other(OTHERSIZE * 2 ), * stateid->seqid(max 10 bytes), * a colon (:) and a terminating null character. */ char str[OTHERSIZE * 2 + 10 + 2]; int32_t diff; clientid4 clientid; nfs_client_id_t *pclientid; int rc; nfsstat4 status; if (isDebug(COMPONENT_STATE)) { sprint_mem(str, (char *)stateid->other, OTHERSIZE); sprintf(str + OTHERSIZE * 2, ":%u", (unsigned int)stateid->seqid); } LogFullDebug(COMPONENT_STATE, "Check %s stateid flags%s%s%s%s%s%s%s", tag, flags & STATEID_SPECIAL_ALL_0 ? " ALL_0" : "", flags & STATEID_SPECIAL_ALL_1 ? " ALL_1" : "", flags & STATEID_SPECIAL_CURRENT ? " CURRENT" : "", flags & STATEID_SPECIAL_CLOSE_40 ? " CLOSE_40" : "", flags & STATEID_SPECIAL_CLOSE_41 ? " CLOSE_41" : "", flags & STATEID_SPECIAL_FREE ? " FREE" : "", flags == 0 ? " NONE" : ""); /* Test for OTHER is all zeros */ if (memcmp(stateid->other, all_zero, OTHERSIZE) == 0) { if (stateid->seqid == 0 && (flags & STATEID_SPECIAL_ALL_0) != 0) { /* All 0 stateid */ LogDebug(COMPONENT_STATE, "Check %s stateid found special all 0 stateid", tag); /** @todo FSF: eventually this may want to return an * actual state for use in temporary locks for I/O. */ data->current_stateid_valid = false; goto success; } if (stateid->seqid == 1 && (flags & STATEID_SPECIAL_CURRENT) != 0) { /* Special current stateid */ LogDebug(COMPONENT_STATE, "Check %s stateid found special 'current' stateid", tag); if (!data->current_stateid_valid) { LogDebug(COMPONENT_STATE, "Check %s stateid STATEID_SPECIAL_CURRENT - current stateid is bad", tag); status = NFS4ERR_BAD_STATEID; goto failure; } /* Copy current stateid in and proceed to checks */ *stateid = data->current_stateid; goto check_it; } LogDebug(COMPONENT_STATE, "Check %s stateid with OTHER all zeros, seqid %u unexpected", tag, (unsigned int)stateid->seqid); status = NFS4ERR_BAD_STATEID; goto failure; } /* Test for OTHER is all ones */ if (memcmp(stateid->other, all_ones, OTHERSIZE) == 0) { /* Test for special all ones stateid */ if (stateid->seqid == seqid_all_one && (flags & STATEID_SPECIAL_ALL_1) != 0) { /* All 1 stateid */ LogDebug(COMPONENT_STATE, "Check %s stateid found special all 1 stateid", tag); /** @todo FSF: eventually this may want to return an * actual state for use in temporary locks for I/O. */ data->current_stateid_valid = false; goto success; } LogDebug(COMPONENT_STATE, "Check %s stateid with OTHER all ones, seqid %u unexpected", tag, (unsigned int)stateid->seqid); status = NFS4ERR_BAD_STATEID; goto failure; } check_it: /* Extract the clientid from the stateid other field */ memcpy(&clientid, stateid->other, sizeof(clientid)); /* Extract the epoch from the clientid */ epoch = clientid >> (clientid4) 32; /* Check if stateid was made from this server instance */ if (epoch != epoch_low) { LogDebug(COMPONENT_STATE, "Check %s stateid found stale stateid %s", tag, str); status = NFS4ERR_STALE_STATEID; goto failure; } /* Try to get the related state */ if (!nfs4_State_Get_Pointer(stateid->other, &state2)) { /* We matched this server's epoch, but could not find the * stateid. Chances are, the client was expired and the state * has all been freed. * * We could use another check here for a BAD stateid */ LogDebug(COMPONENT_STATE, "Check %s stateid could not find state %s", tag, str); /* Try and find the clientid */ rc = nfs_client_id_get_confirmed(clientid, &pclientid); if (rc != CLIENT_ID_SUCCESS) { /* Unknown client id (or other problem), * return that result. */ status = clientid_error_to_nfsstat(rc); goto failure; } if ((flags & (STATEID_SPECIAL_CLOSE_40 | STATEID_SPECIAL_CLOSE_41)) != 0) { /* This is a close with a valid clientid, but invalid * stateid counter. We will assume this is a replayed * close. */ if (data->preserved_clientid != NULL) { /* We don't expect this, but, just in case... * Update and release already reserved lease. */ pthread_mutex_lock(&data->preserved_clientid ->cid_mutex); update_lease(data->preserved_clientid); pthread_mutex_unlock(&data->preserved_clientid ->cid_mutex); data->preserved_clientid = NULL; } /* Check if lease is expired and reserve it */ pthread_mutex_lock(&pclientid->cid_mutex); if (!reserve_lease(pclientid)) { LogDebug(COMPONENT_STATE, "Returning NFS4ERR_EXPIRED"); pthread_mutex_unlock(&pclientid->cid_mutex); status = NFS4ERR_EXPIRED; goto failure; } if ((flags & STATEID_SPECIAL_CLOSE_40) != 0) { /* Just update the lease and leave the reserved * clientid NULL. */ update_lease(pclientid); } else { /* Remember the reserved clientid for the rest * of the compound. */ data->preserved_clientid = pclientid; } pthread_mutex_unlock(&pclientid->cid_mutex); /* Replayed close, it's ok, but stateid doesn't exist */ LogDebug(COMPONENT_STATE, "Check %s stateid is a replayed close", tag); data->current_stateid_valid = false; goto success; } /* Release the clientid reference we just acquired. */ dec_client_id_ref(pclientid); status = NFS4ERR_BAD_STATEID; goto failure; } /* Now, if this lease is not already reserved, reserve it */ if (data->preserved_clientid != state2->state_owner->so_owner.so_nfs4_owner.so_clientrec) { if (data->preserved_clientid != NULL) { /* We don't expect this to happen, but, just in case... * Update and release already reserved lease. */ pthread_mutex_lock(&data->preserved_clientid ->cid_mutex); update_lease(data->preserved_clientid); pthread_mutex_unlock(&data->preserved_clientid ->cid_mutex); data->preserved_clientid = NULL; } /* Check if lease is expired and reserve it */ pthread_mutex_lock(&state2->state_owner->so_owner .so_nfs4_owner.so_clientrec->cid_mutex); if (!reserve_lease (state2->state_owner->so_owner.so_nfs4_owner. so_clientrec)) { LogDebug(COMPONENT_STATE, "Returning NFS4ERR_EXPIRED"); pthread_mutex_unlock(&state2->state_owner->so_owner .so_nfs4_owner.so_clientrec ->cid_mutex); status = NFS4ERR_EXPIRED; goto failure; } data->preserved_clientid = state2->state_owner->so_owner.so_nfs4_owner.so_clientrec; pthread_mutex_unlock(&state2->state_owner->so_owner .so_nfs4_owner.so_clientrec->cid_mutex); } /* Sanity check : Is this the right file ? */ if ((entry != NULL) && (state2->state_entry != entry)) { LogDebug(COMPONENT_STATE, "Check %s stateid found stateid %s has wrong file", tag, str); status = NFS4ERR_BAD_STATEID; goto failure; } /* Whether stateid.seqid may be zero depends on the state type exclusively, See RFC 5661 pp. 161,287-288. */ if ((state2->state_type == STATE_TYPE_LAYOUT) || (stateid->seqid != 0)) { /* Check seqid in stateid */ /** * @todo fsf: maybe change to simple comparison: * stateid->seqid < state2->state_seqid * as good enough and maybe makes pynfs happy. */ diff = stateid->seqid - state2->state_seqid; if (diff < 0) { /* if this is NFSv4.0 and stateid's seqid is one less * than current AND if owner_seqid is current * pass state back to allow replay check */ if ((check_seqid) && ((diff == -1) || ((state2->state_seqid == 1) && (stateid->seqid == seqid_all_one))) && (owner_seqid == state2->state_owner->so_owner.so_nfs4_owner. so_seqid)) { LogDebug(COMPONENT_STATE, "possible replay?"); *state = state2; status = NFS4ERR_REPLAY; goto replay; } /* OLD_STATEID */ LogDebug(COMPONENT_STATE, "Check %s stateid found OLD stateid %s, expected seqid %u", tag, str, (unsigned int)state2->state_seqid); status = NFS4ERR_OLD_STATEID; goto failure; } /* stateid seqid is current and owner seqid is previous, * replay (should be an error condition that did not change * the stateid, no real need to check since the operation * must be the same) */ else if ((diff == 0) && (check_seqid) && (owner_seqid == state2->state_owner->so_owner.so_nfs4_owner. so_seqid)) { LogDebug(COMPONENT_STATE, "possible replay?"); *state = state2; status = NFS4ERR_REPLAY; goto replay; } else if (diff > 0) { /* BAD_STATEID */ LogDebug(COMPONENT_STATE, "Check %s stateid found BAD stateid %s, expected seqid %u", tag, str, (unsigned int)state2->state_seqid); status = NFS4ERR_BAD_STATEID; goto failure; } } if ((flags & STATEID_SPECIAL_FREE) != 0) { switch (state2->state_type) { case STATE_TYPE_LOCK: PTHREAD_RWLOCK_rdlock(&state2->state_entry->state_lock); if (glist_empty (&state2->state_data.lock.state_locklist)) { LogFullDebug(COMPONENT_STATE, "Check %s stateid %s has no locks, ok to free", tag, str); PTHREAD_RWLOCK_unlock(&state2->state_entry-> state_lock); break; } PTHREAD_RWLOCK_unlock(&state2->state_entry->state_lock); /* Fall through for failure */ case STATE_TYPE_NONE: case STATE_TYPE_SHARE: case STATE_TYPE_DELEG: case STATE_TYPE_LAYOUT: LogDebug(COMPONENT_STATE, "Check %s stateid found stateid %s with locks held", tag, str); status = NFS4ERR_LOCKS_HELD; goto failure; } } data->current_stateid_valid = true; LogFullDebug(COMPONENT_STATE, "Check %s stateid found valid stateid %s - %p", tag, str, state2); /* Copy stateid into current for later use */ data->current_stateid = *stateid; data->current_stateid.seqid = state2->state_seqid; success: *state = state2; return NFS4_OK; failure: *state = NULL; replay: data->current_stateid_valid = false; return status; }
cache_inode_status_t cache_inode_remove(cache_entry_t *entry, const char *name, struct req_op_context *req_ctx) { cache_entry_t *to_remove_entry = NULL; fsal_status_t fsal_status = {0, 0}; cache_inode_status_t status = CACHE_INODE_SUCCESS; cache_inode_status_t status_ref_entry = CACHE_INODE_SUCCESS; cache_inode_status_t status_ref_to_remove_entry = CACHE_INODE_SUCCESS; fsal_accessflags_t access_mask = 0; if(entry->type != DIRECTORY) { status = CACHE_INODE_NOT_A_DIRECTORY; goto out; } /* Check if caller is allowed to perform the operation */ access_mask = (FSAL_MODE_MASK_SET(FSAL_W_OK) | FSAL_ACE4_MASK_SET(FSAL_ACE_PERM_DELETE_CHILD)); status = cache_inode_access(entry, access_mask, req_ctx); if (status != CACHE_INODE_SUCCESS) { goto out; } /* Factor this somewhat. In the case where the directory hasn't been populated, the entry may not exist in the cache and we'd be bringing it in just to dispose of it. */ /* Looks up for the entry to remove */ PTHREAD_RWLOCK_rdlock(&entry->content_lock); status = cache_inode_lookup_impl(entry, name, req_ctx, &to_remove_entry); PTHREAD_RWLOCK_unlock(&entry->content_lock); if (to_remove_entry == NULL) { goto out; } status = cache_inode_check_sticky(entry, to_remove_entry, req_ctx); if (status != CACHE_INODE_SUCCESS) { goto out; } LogDebug(COMPONENT_CACHE_INODE, "---> Cache_inode_remove : %s", name); if (is_open(to_remove_entry)) { /* entry is not locked and seems to be open for fd caching purpose. * candidate for closing since unlink of an open file results in 'silly * rename' on certain platforms */ status = cache_inode_close(to_remove_entry, CACHE_INODE_FLAG_REALLYCLOSE); if (status != CACHE_INODE_SUCCESS) { /* non-fatal error. log the warning and move on */ LogCrit(COMPONENT_CACHE_INODE, "Error closing file before unlink: %d.", status); } } fsal_status = entry->obj_handle->ops->unlink(entry->obj_handle, req_ctx, name); if (FSAL_IS_ERROR(fsal_status)) { status = cache_inode_error_convert(fsal_status); if(to_remove_entry->type == DIRECTORY && status == CACHE_INODE_DIR_NOT_EMPTY) { /* its dirent tree is probably stale, flush it * to try and make things right again */ PTHREAD_RWLOCK_wrlock(&to_remove_entry->content_lock); (void)cache_inode_invalidate_all_cached_dirent(to_remove_entry); PTHREAD_RWLOCK_unlock(&to_remove_entry->content_lock); } goto out; } /* Remove the entry from parent dir_entries avl */ PTHREAD_RWLOCK_wrlock(&entry->content_lock); cache_inode_remove_cached_dirent(entry, name, req_ctx); PTHREAD_RWLOCK_unlock(&entry->content_lock); status_ref_entry = cache_inode_refresh_attrs_locked(entry, req_ctx); if(FSAL_IS_ERROR(fsal_status)) { status = cache_inode_error_convert(fsal_status); goto out; } /* Update the attributes for the removed entry */ status_ref_to_remove_entry = cache_inode_refresh_attrs_locked(to_remove_entry, req_ctx); if (status_ref_to_remove_entry == CACHE_INODE_FSAL_ESTALE) { status_ref_to_remove_entry = CACHE_INODE_SUCCESS; } if (((status = status_ref_entry) != CACHE_INODE_SUCCESS) || ((status = status_ref_to_remove_entry) != CACHE_INODE_SUCCESS)) { goto out; } out: LogFullDebug(COMPONENT_CACHE_INODE, "cache_inode_remove_cached_dirent: status=%d", status); /* This is for the reference taken by lookup */ if (to_remove_entry) { cache_inode_put(to_remove_entry); } return status; }
int nfs4_op_lookup(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { /* Convenient alias for the arguments */ LOOKUP4args * const arg_LOOKUP4 = &op->nfs_argop4_u.oplookup; /* Convenient alias for the response */ LOOKUP4res * const res_LOOKUP4 = &resp->nfs_resop4_u.oplookup; /* The name to look up */ char *name = NULL; /* The directory in which to look up the name */ struct fsal_obj_handle *dir_obj = NULL; /* The name found */ struct fsal_obj_handle *file_obj = NULL; /* Status code from fsal */ fsal_status_t status = {0, 0}; resp->resop = NFS4_OP_LOOKUP; res_LOOKUP4->status = NFS4_OK; /* Do basic checks on a filehandle */ res_LOOKUP4->status = nfs4_sanity_check_FH(data, DIRECTORY, false); if (res_LOOKUP4->status != NFS4_OK) { /* for some reason lookup is picky. Just not being * dir is not enough. We want to know it is a symlink */ if (res_LOOKUP4->status == NFS4ERR_NOTDIR && data->current_filetype == SYMBOLIC_LINK) res_LOOKUP4->status = NFS4ERR_SYMLINK; goto out; } /* Validate and convert the UFT8 objname to a regular string */ res_LOOKUP4->status = nfs4_utf8string2dynamic(&arg_LOOKUP4->objname, UTF8_SCAN_ALL, &name); if (res_LOOKUP4->status != NFS4_OK) goto out; LogDebug(COMPONENT_NFS_V4, "name=%s", name); /* Do the lookup in the FSAL */ file_obj = NULL; dir_obj = data->current_obj; /* Sanity check: dir_obj should be ACTUALLY a directory */ status = fsal_lookup(dir_obj, name, &file_obj, NULL); if (FSAL_IS_ERROR(status)) { res_LOOKUP4->status = nfs4_Errno_status(status); goto out; } if (file_obj->type == DIRECTORY) { PTHREAD_RWLOCK_rdlock(&file_obj->state_hdl->state_lock); if (file_obj->state_hdl->dir.junction_export != NULL) { /* Handle junction */ struct fsal_obj_handle *obj = NULL; /* Attempt to get a reference to the export across the * junction. */ if (!export_ready( file_obj->state_hdl->dir.junction_export)) { /* If we could not get a reference, return * stale. Release state_lock */ LogDebug(COMPONENT_EXPORT, "NFS4ERR_STALE on LOOKUP of %s", name); res_LOOKUP4->status = NFS4ERR_STALE; PTHREAD_RWLOCK_unlock( &file_obj->state_hdl->state_lock); goto out; } get_gsh_export_ref( file_obj->state_hdl->dir.junction_export); /* Release any old export reference */ if (op_ctx->ctx_export != NULL) put_gsh_export(op_ctx->ctx_export); /* Stash the new export in the compound data. */ op_ctx->ctx_export = file_obj->state_hdl->dir.junction_export; op_ctx->fsal_export = op_ctx->ctx_export->fsal_export; PTHREAD_RWLOCK_unlock(&file_obj->state_hdl->state_lock); /* Build credentials */ res_LOOKUP4->status = nfs4_export_check_access(data->req); /* Test for access error (export should not be visible). */ if (res_LOOKUP4->status == NFS4ERR_ACCESS) { /* If return is NFS4ERR_ACCESS then this client * doesn't have access to this export, return * NFS4ERR_NOENT to hide it. It was not visible * in READDIR response. */ LogDebug(COMPONENT_EXPORT, "NFS4ERR_ACCESS Hiding Export_Id %d Pseudo %s with NFS4ERR_NOENT", op_ctx->ctx_export->export_id, op_ctx->ctx_export->pseudopath); res_LOOKUP4->status = NFS4ERR_NOENT; goto out; } if (res_LOOKUP4->status == NFS4ERR_WRONGSEC) { /* LogInfo already documents why */ goto out; } if (res_LOOKUP4->status != NFS4_OK) { /* Should never get here, * nfs4_export_check_access can only return * NFS4_OK, NFS4ERR_ACCESS or NFS4ERR_WRONGSEC. */ LogMajor(COMPONENT_EXPORT, "PSEUDO FS JUNCTION TRAVERSAL: Failed with %s for %s, id=%d", nfsstat4_to_str(res_LOOKUP4->status), op_ctx->ctx_export->pseudopath, op_ctx->ctx_export->export_id); goto out; } status = nfs_export_get_root_entry(op_ctx->ctx_export, &obj); if (FSAL_IS_ERROR(status)) { LogMajor(COMPONENT_EXPORT, "PSEUDO FS JUNCTION TRAVERSAL: Failed to get root for %s, id=%d, status = %s", op_ctx->ctx_export->pseudopath, op_ctx->ctx_export->export_id, msg_fsal_err(status.major)); res_LOOKUP4->status = nfs4_Errno_status(status); goto out; } LogDebug(COMPONENT_EXPORT, "PSEUDO FS JUNCTION TRAVERSAL: Crossed to %s, id=%d for name=%s", op_ctx->ctx_export->pseudopath, op_ctx->ctx_export->export_id, name); file_obj->obj_ops->put_ref(file_obj); file_obj = obj; } else { PTHREAD_RWLOCK_unlock(&file_obj->state_hdl->state_lock); } } /* Convert it to a file handle */ if (!nfs4_FSALToFhandle(false, &data->currentFH, file_obj, op_ctx->ctx_export)) { res_LOOKUP4->status = NFS4ERR_SERVERFAULT; goto out; } /* Keep the pointer within the compound data */ set_current_entry(data, file_obj); /* Put our ref */ file_obj->obj_ops->put_ref(file_obj); file_obj = NULL; /* Return successfully */ res_LOOKUP4->status = NFS4_OK; out: /* Release reference on file_obj if we didn't utilze it. */ if (file_obj) file_obj->obj_ops->put_ref(file_obj); gsh_free(name); return res_LOOKUP4->status; } /* nfs4_op_lookup */
/** * @brief Copy file content. * * @param[in] src_entry File to copy from * @param[in] src_offset Offset start from the source file * @param[in] dst_entry Destination file to copy to * @param[in] dst_offset Offset in the dest file * @param[out] count Requested bytes to copy * @param[out] copied Bytes successfully copied * * @return CACHE_INODE_SUCCESS or various errors */ cache_inode_status_t cache_inode_copy(cache_entry_t *src_entry, uint64_t src_offset, cache_entry_t *dst_entry, uint64_t dst_offset, uint64_t count, uint64_t *copied) { fsal_status_t fsal_status = { 0, 0 }; cache_inode_status_t status; /** * To avoid deadlock, we always lock the entry with a smaller address * before the locking the other entry. Note that "content_lock" * protects "cache content" instead of file content. So only reader * lock is needed for either file. */ if ((size_t)src_entry < (size_t)dst_entry) { PTHREAD_RWLOCK_rdlock(&src_entry->content_lock); PTHREAD_RWLOCK_rdlock(&dst_entry->content_lock); } else { PTHREAD_RWLOCK_rdlock(&dst_entry->content_lock); PTHREAD_RWLOCK_rdlock(&src_entry->content_lock); } if (!is_open(src_entry) || !is_open(dst_entry)) { LogEvent(COMPONENT_CACHE_INODE, "Cannot copy between files that are not open"); status = NFS4ERR_OPENMODE; goto out; } if (count == UINT64_MAX) { count = src_entry->obj_handle->attrs->filesize - src_offset; LogDebug(COMPONENT_CACHE_INODE, "0-count has an effective value of %zu", count); } fsal_status = src_entry->obj_handle->obj_ops.copy(src_entry->obj_handle, src_offset, dst_entry->obj_handle, dst_offset, count, copied); if (FSAL_IS_ERROR(fsal_status)) { *copied = 0; status = cache_inode_error_convert(fsal_status); LogEvent(COMPONENT_CACHE_INODE, "File copy failed: major = %d, minor = %d", fsal_status.major, fsal_status.minor); goto out; } /* Update dest file after coping to it. */ PTHREAD_RWLOCK_wrlock(&dst_entry->attr_lock); status = cache_inode_refresh_attrs(dst_entry); PTHREAD_RWLOCK_unlock(&dst_entry->attr_lock); out: if ((size_t)src_entry < (size_t)dst_entry) { PTHREAD_RWLOCK_unlock(&dst_entry->content_lock); PTHREAD_RWLOCK_unlock(&src_entry->content_lock); } else { PTHREAD_RWLOCK_unlock(&src_entry->content_lock); PTHREAD_RWLOCK_unlock(&dst_entry->content_lock); } return status; }
/** * @brief Delete the unecessary directories from pseudo FS * * @param pseudopath [IN] full path of the node * @param entry [IN] cache entry for the last directory in the path * * If this entry is present is pseudo FSAL, and is unnecessary, then remove it. * Check recursively if the parent entry is needed. * * The pseudopath is deconstructed in place to create the subsequently shorter * pseudo paths. * * When called the first time, entry is the mount point of an export that has * been unmounted from the PseudoFS. By definition, it is NOT the root of a * PseudoFS. Also, the PseudoFS root filesystem is NOT mounted and thus this * function will not be called for it. The req_op_context references the * export for the PseudoFS entry is within. Note that the caller is * responsible for checking if it is an FSAL_PSEUDO export (we only clean up * directories in FSAL_PSEUDO filesystems). */ void cleanup_pseudofs_node(char *pseudopath, struct fsal_obj_handle *obj) { struct fsal_obj_handle *parent_obj; char *pos = pseudopath + strlen(pseudopath) - 1; char *name; fsal_status_t fsal_status; /* Strip trailing / from pseudopath */ while (*pos == '/') pos--; /* Replace first trailing / if any with NUL */ pos[1] = '\0'; /* Find the previous slash. * We will NEVER back up PAST the root, so no need to check * for walking off the beginning of the string. */ while (*pos != '/') pos--; /* Remember the element name for remove */ name = pos + 1; LogDebug(COMPONENT_EXPORT, "Checking if pseudo node %s is needed", pseudopath); fsal_status = fsal_lookupp(obj, &parent_obj, NULL); if (FSAL_IS_ERROR(fsal_status)) { /* Truncate the pseudopath to be the path to the parent */ *pos = '\0'; LogCrit(COMPONENT_EXPORT, "Could not find cache entry for parent directory %s", pseudopath); return; } fsal_status = fsal_remove(parent_obj, name); if (FSAL_IS_ERROR(fsal_status)) { LogCrit(COMPONENT_EXPORT, "Removing pseudo node %s failed with %s", pseudopath, msg_fsal_err(fsal_status.major)); goto out; } /* Before recursing the check the parent, get export lock for looking at * exp_root_obj so we can check if we have reached the root of * the mounted on export. */ PTHREAD_RWLOCK_rdlock(&op_ctx->ctx_export->lock); if (parent_obj == op_ctx->ctx_export->exp_root_obj) { LogDebug(COMPONENT_EXPORT, "Reached root of PseudoFS %s", op_ctx->ctx_export->pseudopath); PTHREAD_RWLOCK_unlock(&op_ctx->ctx_export->lock); goto out; } PTHREAD_RWLOCK_unlock(&op_ctx->ctx_export->lock); /* Truncate the pseudopath to be the path to the parent */ *pos = '\0'; /* check if the parent directory is needed */ cleanup_pseudofs_node(pseudopath, parent_obj); out: parent_obj->obj_ops.put_ref(parent_obj); return; }
static bool name2id(const struct gsh_buffdesc *name, uint32_t *id, bool group, const uint32_t anon) { bool success; PTHREAD_RWLOCK_rdlock(group ? &idmapper_group_lock : &idmapper_user_lock); if (group) success = idmapper_lookup_by_gname(name, id); else success = idmapper_lookup_by_uname(name, id, NULL, false); PTHREAD_RWLOCK_unlock(group ? &idmapper_group_lock : &idmapper_user_lock); if (success) return true; else { gid_t gid; bool got_gid = false; /* Something we can mutate and count on as terminated */ char *namebuff = alloca(name->len + 1); char *at; bool looked_up = false; memcpy(namebuff, name->addr, name->len); *(namebuff + name->len) = '\0'; at = memchr(namebuff, '@', name->len); if (at == NULL) { if (pwentname2id (namebuff, name->len, id, anon, group, &gid, &got_gid, NULL)) looked_up = true; else if (atless2id(namebuff, name->len, id, anon)) looked_up = true; else return false; } else if (nfs_param.nfsv4_param.use_getpwnam) { looked_up = pwentname2id(namebuff, name->len, id, anon, group, &gid, &got_gid, at); } else { looked_up = idmapname2id(namebuff, name->len, id, anon, group, &gid, &got_gid, at); } if (!looked_up) { LogInfo(COMPONENT_IDMAPPER, "All lookups failed for %s, using anonymous.", namebuff); *id = anon; } PTHREAD_RWLOCK_wrlock(group ? &idmapper_group_lock : &idmapper_user_lock); if (group) success = idmapper_add_group(name, *id); else success = idmapper_add_user(name, *id, got_gid ? &gid : NULL, false); PTHREAD_RWLOCK_unlock(group ? &idmapper_group_lock : &idmapper_user_lock); if (!success) LogMajor(COMPONENT_IDMAPPER, "%s(%s %u) failed", (group ? "gidmap_add" : "uidmap_add"), namebuff, *id); return true; } }
static bool xdr_encode_nfs4_princ(XDR *xdrs, uint32_t id, bool group) { const struct gsh_buffdesc *found; uint32_t not_a_size_t; bool success = false; PTHREAD_RWLOCK_rdlock(group ? &idmapper_group_lock : &idmapper_user_lock); if (group) success = idmapper_lookup_by_gid(id, &found); else success = idmapper_lookup_by_uid(id, &found, NULL); if (likely(success)) { not_a_size_t = found->len; /* Fully qualified owners are always stored in the hash table, no matter what our lookup method. */ success = inline_xdr_bytes(xdrs, (char **)&found->addr, ¬_a_size_t, UINT32_MAX); PTHREAD_RWLOCK_unlock(group ? &idmapper_group_lock : &idmapper_user_lock); return success; } else { PTHREAD_RWLOCK_unlock(group ? &idmapper_group_lock : &idmapper_user_lock); int rc; int size; bool looked_up = false; char *namebuff = NULL; struct gsh_buffdesc new_name; if (nfs_param.nfsv4_param.use_getpwnam) { if (group) size = sysconf(_SC_GETGR_R_SIZE_MAX); else size = sysconf(_SC_GETPW_R_SIZE_MAX); if (size == -1) size = PWENT_BEST_GUESS_LEN; new_name.len = size; size += owner_domain.len + 2; } else { size = NFS4_MAX_DOMAIN_LEN + 2; } namebuff = alloca(size); new_name.addr = namebuff; if (nfs_param.nfsv4_param.use_getpwnam) { char *cursor; bool nulled; if (group) { struct group g; struct group *gres; rc = getgrgid_r(id, &g, namebuff, new_name.len, &gres); nulled = (gres == NULL); } else { struct passwd p; struct passwd *pres; rc = getpwuid_r(id, &p, namebuff, new_name.len, &pres); nulled = (pres == NULL); } if ((rc == 0) && !nulled) { new_name.len = strlen(namebuff); cursor = namebuff + new_name.len; *(cursor++) = '@'; ++new_name.len; memcpy(cursor, owner_domain.addr, owner_domain.len); new_name.len += owner_domain.len; looked_up = true; } else { LogInfo(COMPONENT_IDMAPPER, "%s failed with code %d.", (group ? "getgrgid_r" : "getpwuid_r"), rc); } } else { #ifdef USE_NFSIDMAP if (group) { rc = nfs4_gid_to_name(id, owner_domain.addr, namebuff, NFS4_MAX_DOMAIN_LEN + 1); } else { rc = nfs4_uid_to_name(id, owner_domain.addr, namebuff, NFS4_MAX_DOMAIN_LEN + 1); } if (rc == 0) { new_name.len = strlen(namebuff); looked_up = true; } else { LogInfo(COMPONENT_IDMAPPER, "%s failed with code %d.", (group ? "nfs4_gid_to_name" : "nfs4_uid_to_name"), rc); } #else /* USE_NFSIDMAP */ looked_up = false; #endif /* !USE_NFSIDMAP */ } if (!looked_up) { if (nfs_param.nfsv4_param.allow_numeric_owners) { LogInfo(COMPONENT_IDMAPPER, "Lookup for %d failed, using numeric %s", id, (group ? "group" : "owner")); /* 2**32 is 10 digits long in decimal */ sprintf(namebuff, "%u", id); new_name.len = strlen(namebuff); } else { LogInfo(COMPONENT_IDMAPPER, "Lookup for %d failed, using nobody.", id); memcpy(new_name.addr, "nobody", 6); new_name.len = 6; } } /* Add to the cache and encode the result. */ PTHREAD_RWLOCK_wrlock(group ? &idmapper_group_lock : &idmapper_user_lock); if (group) success = idmapper_add_group(&new_name, id); else success = idmapper_add_user(&new_name, id, NULL, false); PTHREAD_RWLOCK_unlock(group ? &idmapper_group_lock : &idmapper_user_lock); if (unlikely(!success)) { LogMajor(COMPONENT_IDMAPPER, "%s failed.", group ? "idmapper_add_group" : "idmaper_add_user"); } not_a_size_t = new_name.len; return inline_xdr_bytes(xdrs, (char **)&new_name.addr, ¬_a_size_t, UINT32_MAX); } }
fsal_status_t vfs_lock_op(struct fsal_obj_handle *obj_hdl, void *p_owner, fsal_lock_op_t lock_op, fsal_lock_param_t *request_lock, fsal_lock_param_t *conflicting_lock) { struct vfs_fsal_obj_handle *myself; struct flock lock_args; int fcntl_comm; fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR; int retval = 0; myself = container_of(obj_hdl, struct vfs_fsal_obj_handle, obj_handle); if (obj_hdl->fsal != obj_hdl->fs->fsal) { LogDebug(COMPONENT_FSAL, "FSAL %s operation for handle belonging to FSAL %s, return EXDEV", obj_hdl->fsal->name, obj_hdl->fs->fsal->name); retval = EXDEV; fsal_error = posix2fsal_error(retval); return fsalstat(fsal_error, retval); } /* Take read lock on object to protect file descriptor. */ PTHREAD_RWLOCK_rdlock(&obj_hdl->lock); if (myself->u.file.fd < 0 || myself->u.file.openflags == FSAL_O_CLOSED) { LogDebug(COMPONENT_FSAL, "Attempting to lock with no file descriptor open"); fsal_error = ERR_FSAL_FAULT; goto out; } if (p_owner != NULL) { fsal_error = ERR_FSAL_NOTSUPP; goto out; } LogFullDebug(COMPONENT_FSAL, "Locking: op:%d type:%d start:%" PRIu64 " length:%" PRIu64, lock_op, request_lock->lock_type, request_lock->lock_start, request_lock->lock_length); if (lock_op == FSAL_OP_LOCKT) { fcntl_comm = F_GETLK; } else if (lock_op == FSAL_OP_LOCK || lock_op == FSAL_OP_UNLOCK) { fcntl_comm = F_SETLK; } else { LogDebug(COMPONENT_FSAL, "ERROR: Lock operation requested was not TEST, READ, or WRITE."); fsal_error = ERR_FSAL_NOTSUPP; goto out; } if (request_lock->lock_type == FSAL_LOCK_R) { lock_args.l_type = F_RDLCK; } else if (request_lock->lock_type == FSAL_LOCK_W) { lock_args.l_type = F_WRLCK; } else { LogDebug(COMPONENT_FSAL, "ERROR: The requested lock type was not read or write."); fsal_error = ERR_FSAL_NOTSUPP; goto out; } if (lock_op == FSAL_OP_UNLOCK) lock_args.l_type = F_UNLCK; lock_args.l_len = request_lock->lock_length; lock_args.l_start = request_lock->lock_start; lock_args.l_whence = SEEK_SET; /* flock.l_len being signed long integer, larger lock ranges may * get mapped to negative values. As per 'man 3 fcntl', posix * locks can accept negative l_len values which may lead to * unlocking an unintended range. Better bail out to prevent that. */ if (lock_args.l_len < 0) { LogCrit(COMPONENT_FSAL, "The requested lock length is out of range- lock_args.l_len(%ld), request_lock_length(%" PRIu64 ")", lock_args.l_len, request_lock->lock_length); fsal_error = ERR_FSAL_BAD_RANGE; goto out; } errno = 0; retval = fcntl(myself->u.file.fd, fcntl_comm, &lock_args); if (retval && lock_op == FSAL_OP_LOCK) { retval = errno; if (conflicting_lock != NULL) { fcntl_comm = F_GETLK; if (fcntl(myself->u.file.fd, fcntl_comm, &lock_args)) { retval = errno; /* we lose the inital error */ LogCrit(COMPONENT_FSAL, "After failing a lock request, I couldn't even get the details of who owns the lock."); fsal_error = posix2fsal_error(retval); goto out; } if (conflicting_lock != NULL) { conflicting_lock->lock_length = lock_args.l_len; conflicting_lock->lock_start = lock_args.l_start; conflicting_lock->lock_type = lock_args.l_type; } } fsal_error = posix2fsal_error(retval); goto out; } /* F_UNLCK is returned then the tested operation would be possible. */ if (conflicting_lock != NULL) { if (lock_op == FSAL_OP_LOCKT && lock_args.l_type != F_UNLCK) { conflicting_lock->lock_length = lock_args.l_len; conflicting_lock->lock_start = lock_args.l_start; conflicting_lock->lock_type = lock_args.l_type; } else { conflicting_lock->lock_length = 0; conflicting_lock->lock_start = 0; conflicting_lock->lock_type = FSAL_NO_LOCK; } } out: PTHREAD_RWLOCK_unlock(&obj_hdl->lock); return fsalstat(fsal_error, retval); }
/** * @brief NFS4_OP_LOOKUPP * * This function implements the NFS4_OP_LOOKUPP operation, which looks * up the parent of the supplied directory. * * @param[in] op Arguments for nfs4_op * @param[in,out] data Compound request's data * @param[out] resp Results for nfs4_op * * @return per RFC5661, p. 369 * */ int nfs4_op_lookupp(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { LOOKUPP4res * const res_LOOKUPP4 = &resp->nfs_resop4_u.oplookupp; struct fsal_obj_handle *dir_obj = NULL; struct fsal_obj_handle *file_obj; struct fsal_obj_handle *root_obj; fsal_status_t status; struct gsh_export *original_export = op_ctx->ctx_export; resp->resop = NFS4_OP_LOOKUPP; res_LOOKUPP4->status = NFS4_OK; /* Do basic checks on a filehandle */ res_LOOKUPP4->status = nfs4_sanity_check_FH(data, DIRECTORY, false); if (res_LOOKUPP4->status != NFS4_OK) return res_LOOKUPP4->status; /* Preparing for cache_inode_lookup ".." */ file_obj = NULL; dir_obj = data->current_obj; /* If Filehandle points to the root of the current export, then backup * through junction into the containing export. */ if (data->current_obj->type != DIRECTORY) goto not_junction; PTHREAD_RWLOCK_rdlock(&original_export->lock); status = nfs_export_get_root_entry(original_export, &root_obj); if (FSAL_IS_ERROR(status)) { res_LOOKUPP4->status = nfs4_Errno_status(status); PTHREAD_RWLOCK_unlock(&original_export->lock); return res_LOOKUPP4->status; } if (data->current_obj == root_obj) { struct gsh_export *parent_exp = NULL; /* Handle reverse junction */ LogDebug(COMPONENT_EXPORT, "Handling reverse junction from Export_Id %d Path %s Parent=%p", original_export->export_id, original_export->fullpath, original_export->exp_parent_exp); if (original_export->exp_parent_exp == NULL) { /* lookupp on the root on the pseudofs should return * NFS4ERR_NOENT (RFC3530, page 166) */ PTHREAD_RWLOCK_unlock(&original_export->lock); res_LOOKUPP4->status = NFS4ERR_NOENT; return res_LOOKUPP4->status; } PTHREAD_RWLOCK_unlock(&original_export->lock); /* Clear out data->current entry outside lock * so if it cascades into cleanup, we aren't holding * an export lock that would cause trouble. */ set_current_entry(data, NULL); /* We need to protect accessing the parent information * with the export lock. We use the current export's lock * which is plenty, the parent can't go away without * grabbing the current export's lock to clean out the * parent information. */ PTHREAD_RWLOCK_rdlock(&original_export->lock); /* Get the junction inode into dir_obj and parent_exp * for reference. */ dir_obj = original_export->exp_junction_obj; parent_exp = original_export->exp_parent_exp; /* Check if there is a problem with the export and try and * get a reference to the parent export. */ if (dir_obj == NULL || parent_exp == NULL || !export_ready(parent_exp)) { /* Export is in the process of dying */ PTHREAD_RWLOCK_unlock(&original_export->lock); LogCrit(COMPONENT_EXPORT, "Reverse junction from Export_Id %d Path %s Parent=%p is stale", original_export->export_id, original_export->fullpath, parent_exp); res_LOOKUPP4->status = NFS4ERR_STALE; return res_LOOKUPP4->status; } get_gsh_export_ref(parent_exp); dir_obj->obj_ops.get_ref(dir_obj); /* Set up dir_obj as current obj with an LRU reference * while still holding the lock. */ set_current_entry(data, dir_obj); /* Put our ref */ dir_obj->obj_ops.put_ref(dir_obj); /* Stash parent export in opctx while still holding the lock. */ op_ctx->ctx_export = parent_exp; op_ctx->fsal_export = op_ctx->ctx_export->fsal_export; /* Now we are safely transitioned to the parent export and can * release the lock. */ PTHREAD_RWLOCK_unlock(&original_export->lock); /* Release old export reference that was held by opctx. */ put_gsh_export(original_export); /* Build credentials */ res_LOOKUPP4->status = nfs4_export_check_access(data->req); /* Test for access error (export should not be visible). */ if (res_LOOKUPP4->status == NFS4ERR_ACCESS) { /* If return is NFS4ERR_ACCESS then this client doesn't * have access to this export, return NFS4ERR_NOENT to * hide it. It was not visible in READDIR response. */ LogDebug(COMPONENT_EXPORT, "NFS4ERR_ACCESS Hiding Export_Id %d Path %s with NFS4ERR_NOENT", parent_exp->export_id, parent_exp->fullpath); res_LOOKUPP4->status = NFS4ERR_NOENT; return res_LOOKUPP4->status; } } else { /* Release the lock taken above */ PTHREAD_RWLOCK_unlock(&original_export->lock); } /* Return our ref from above */ root_obj->obj_ops.put_ref(root_obj); not_junction: status = fsal_lookupp(dir_obj, &file_obj, NULL); if (file_obj != NULL) { /* Convert it to a file handle */ if (!nfs4_FSALToFhandle(false, &data->currentFH, file_obj, op_ctx->ctx_export)) { res_LOOKUPP4->status = NFS4ERR_SERVERFAULT; file_obj->obj_ops.put_ref(file_obj); return res_LOOKUPP4->status; } /* Keep the pointer within the compound data */ set_current_entry(data, file_obj); /* Put our ref */ file_obj->obj_ops.put_ref(file_obj); /* Return successfully */ res_LOOKUPP4->status = NFS4_OK; } else { /* Unable to look up parent for some reason. * Return error. */ set_current_entry(data, NULL); res_LOOKUPP4->status = nfs4_Errno_status(status); } return res_LOOKUPP4->status; } /* nfs4_op_lookupp */
/** * @brief Look up an entry, latching the table * * This function looks up an entry in the hash table and latches the * partition in which that entry would belong in preparation for other * activities. This function is a primitive and is intended more for * use building other access functions than for client code itself. * * @brief[in] ht The hash table to search * @brief[in] key The key for which to search * @brief[out] val The value found * @brief[in] may_write This must be true if the followup call might * mutate the hash table (set or delete) * @brief[out] latch Opaque structure holding information on the * table. * * @retval HASHTABLE_SUCCESS The entry was found, the table is * latched. * @retval HASHTABLE_ERROR_NOT_FOUND The entry was not found, the * table is latched. * @retval Others, failure, the table is not latched. */ hash_error_t hashtable_getlatch(struct hash_table *ht, const struct gsh_buffdesc *key, struct gsh_buffdesc *val, bool may_write, struct hash_latch *latch) { /* The index specifying the partition to search */ uint32_t index = 0; /* The node found for the key */ struct rbt_node *locator = NULL; /* The buffer descritpros for the key and value for the found entry */ struct hash_data *data = NULL; /* The hash value to be searched for within the Red-Black tree */ uint64_t rbt_hash = 0; /* Stored error return */ hash_error_t rc = HASHTABLE_SUCCESS; /* This combination of options makes no sense ever */ assert(!(may_write && !latch)); rc = compute(ht, key, &index, &rbt_hash); if (rc != HASHTABLE_SUCCESS) return rc; /* Acquire mutex */ if (may_write) PTHREAD_RWLOCK_wrlock(&(ht->partitions[index].lock)); else PTHREAD_RWLOCK_rdlock(&(ht->partitions[index].lock)); rc = key_locate(ht, key, index, rbt_hash, &locator); if (rc == HASHTABLE_SUCCESS) { /* Key was found */ data = RBT_OPAQ(locator); if (val) { val->addr = data->val.addr; val->len = data->val.len; } if (isDebug(COMPONENT_HASHTABLE) && isFullDebug(ht->parameter.ht_log_component)) { char dispval[HASHTABLE_DISPLAY_STRLEN]; if (ht->parameter.val_to_str != NULL) ht->parameter.val_to_str(&data->val, dispval); else dispval[0] = '\0'; LogFullDebug(ht->parameter.ht_log_component, "Get %s returning Value=%p {%s}", ht->parameter.ht_name, data->val.addr, dispval); } } if (((rc == HASHTABLE_SUCCESS) || (rc == HASHTABLE_ERROR_NO_SUCH_KEY)) && (latch != NULL)) { latch->index = index; latch->rbt_hash = rbt_hash; latch->locator = locator; } else { PTHREAD_RWLOCK_unlock(&ht->partitions[index].lock); } if (rc != HASHTABLE_SUCCESS && isDebug(COMPONENT_HASHTABLE) && isFullDebug(ht->parameter.ht_log_component)) LogFullDebug(ht->parameter.ht_log_component, "Get %s returning failure %s", ht->parameter.ht_name, hash_table_err_to_str(rc)); return rc; }
/** * @brief Convert a principal (as returned by @c gss_display_name) to a UID * * @param[in] name The principal of the user * @param[out] uid The resulting UID * * @return true if successful, false otherwise */ bool principal2uid(char *principal, uid_t *uid, gid_t *gid) #endif { #ifdef USE_NFSIDMAP uid_t gss_uid = ANON_UID; gid_t gss_gid = ANON_GID; const gid_t *gss_gidres = NULL; int rc; bool success; struct gsh_buffdesc princbuff = { .addr = principal, .len = strlen(principal) }; #endif if (nfs_param.nfsv4_param.use_getpwnam) return false; #ifdef USE_NFSIDMAP PTHREAD_RWLOCK_rdlock(&idmapper_user_lock); success = idmapper_lookup_by_uname(&princbuff, &gss_uid, &gss_gidres, true); if (success && gss_gidres) gss_gid = *gss_gidres; PTHREAD_RWLOCK_unlock(&idmapper_user_lock); if (unlikely(!success)) { if ((princbuff.len >= 4) && (!memcmp(princbuff.addr, "nfs/", 4) || !memcmp(princbuff.addr, "root/", 5) || !memcmp(princbuff.addr, "host/", 5))) { /* NFSv4 specific features: RPCSEC_GSS will * provide user like * * nfs/<host> * root/<host> * host/<host> * choice is made to map them to root */ /* This is a "root" request made from the hostbased nfs principal, use root */ *uid = 0; return true; } /* nfs4_gss_princ_to_ids required to extract uid/gid from gss creds */ rc = nfs4_gss_princ_to_ids("krb5", principal, &gss_uid, &gss_gid); if (rc) { #ifdef _MSPAC_SUPPORT bool found_uid = false; bool found_gid = false; if (gd->flags & SVC_RPC_GSS_FLAG_MSPAC) { struct wbcAuthUserParams params; wbcErr wbc_err; struct wbcAuthUserInfo *info; struct wbcAuthErrorInfo *error = NULL; memset(¶ms, 0, sizeof(params)); params.level = WBC_AUTH_USER_LEVEL_PAC; params.password.pac.data = (uint8_t *) gd->pac.ms_pac.value; params.password.pac.length = gd->pac.ms_pac.length; wbc_err = wbcAuthenticateUserEx(¶ms, &info, &error); if (!WBC_ERROR_IS_OK(wbc_err)) { LogCrit(COMPONENT_IDMAPPER, "wbcAuthenticateUserEx returned %s", wbcErrorString(wbc_err)); return false; } if (error) { LogCrit(COMPONENT_IDMAPPER, "nt_status: %s, display_string %s", error->nt_string, error->display_string); wbcFreeMemory(error); return false; } /* 1st SID is account sid, see wbclient.h */ wbc_err = wbcSidToUid(&info->sids[0].sid, &gss_uid); if (!WBC_ERROR_IS_OK(wbc_err)) { LogCrit(COMPONENT_IDMAPPER, "wbcSidToUid for uid returned %s", wbcErrorString(wbc_err)); wbcFreeMemory(info); return false; } /* 2nd SID is primary_group sid, see wbclient.h */ wbc_err = wbcSidToGid(&info->sids[1].sid, &gss_gid); if (!WBC_ERROR_IS_OK(wbc_err)) { LogCrit(COMPONENT_IDMAPPER, "wbcSidToUid for gid returned %s\n", wbcErrorString(wbc_err)); wbcFreeMemory(info); return false; } wbcFreeMemory(info); found_uid = true; found_gid = true; } #endif /* _MSPAC_SUPPORT */ #ifdef _MSPAC_SUPPORT if ((found_uid == true) && (found_gid == true)) goto principal_found; #endif return false; } #ifdef _MSPAC_SUPPORT principal_found: #endif PTHREAD_RWLOCK_wrlock(&idmapper_user_lock); success = idmapper_add_user(&princbuff, gss_uid, &gss_gid, true); PTHREAD_RWLOCK_unlock(&idmapper_user_lock); if (!success) { LogMajor(COMPONENT_IDMAPPER, "idmapper_add_user(%s, %d, %d) failed", principal, gss_uid, gss_gid); } } *uid = gss_uid; *gid = gss_gid; return true; #else /* !USE_NFSIDMAP */ assert(!"prohibited by configuration"); return false; #endif }
cache_inode_status_t cache_inode_readdir(cache_entry_t *directory, uint64_t cookie, unsigned int *nbfound, bool *eod_met, attrmask_t attrmask, cache_inode_getattr_cb_t cb, void *opaque) { /* The entry being examined */ cache_inode_dir_entry_t *dirent = NULL; /* The node in the tree being traversed */ struct avltree_node *dirent_node; /* The access mask corresponding to permission to list directory entries */ fsal_accessflags_t access_mask = (FSAL_MODE_MASK_SET(FSAL_R_OK) | FSAL_ACE4_MASK_SET(FSAL_ACE_PERM_LIST_DIR)); fsal_accessflags_t access_mask_attr = (FSAL_MODE_MASK_SET(FSAL_R_OK) | FSAL_MODE_MASK_SET(FSAL_X_OK) | FSAL_ACE4_MASK_SET(FSAL_ACE_PERM_LIST_DIR) | FSAL_ACE4_MASK_SET(FSAL_ACE_PERM_EXECUTE)); cache_inode_status_t status = CACHE_INODE_SUCCESS; cache_inode_status_t attr_status; struct cache_inode_readdir_cb_parms cb_parms = { opaque, NULL, true, 0, true }; bool retry_stale = true; LogFullDebug(COMPONENT_NFS_READDIR, "Enter...."); /* readdir can be done only with a directory */ if (directory->type != DIRECTORY) { status = CACHE_INODE_NOT_A_DIRECTORY; /* no lock acquired so far, just return status */ LogFullDebug(COMPONENT_NFS_READDIR, "Not a directory"); return status; } /* cache_inode_lock_trust_attrs can return an error, and no lock will * be acquired */ status = cache_inode_lock_trust_attrs(directory, false); if (status != CACHE_INODE_SUCCESS) { LogDebug(COMPONENT_NFS_READDIR, "cache_inode_lock_trust_attrs status=%s", cache_inode_err_str(status)); return status; } /* Adjust access mask if ACL is asked for. * NOTE: We intentionally do NOT check ACE4_READ_ATTR. */ if ((attrmask & ATTR_ACL) != 0) { access_mask |= FSAL_ACE4_MASK_SET(FSAL_ACE_PERM_READ_ACL); access_mask_attr |= FSAL_ACE4_MASK_SET(FSAL_ACE_PERM_READ_ACL); } /* Check if user (as specified by the credentials) is authorized to read * the directory or not */ status = cache_inode_access_no_mutex(directory, access_mask); if (status != CACHE_INODE_SUCCESS) { LogFullDebug(COMPONENT_NFS_READDIR, "permission check for directory status=%s", cache_inode_err_str(status)); PTHREAD_RWLOCK_unlock(&directory->attr_lock); return status; } if (attrmask != 0) { /* Check for access permission to get attributes */ attr_status = cache_inode_access_no_mutex(directory, access_mask_attr); if (attr_status != CACHE_INODE_SUCCESS) { LogFullDebug(COMPONENT_NFS_READDIR, "permission check for attributes " "status=%s", cache_inode_err_str(attr_status)); } } else /* No attributes requested, we don't need permission */ attr_status = CACHE_INODE_SUCCESS; PTHREAD_RWLOCK_rdlock(&directory->content_lock); PTHREAD_RWLOCK_unlock(&directory->attr_lock); if (! ((directory->flags & CACHE_INODE_TRUST_CONTENT) && (directory->flags & CACHE_INODE_DIR_POPULATED))) { PTHREAD_RWLOCK_unlock(&directory->content_lock); PTHREAD_RWLOCK_wrlock(&directory->content_lock); status = cache_inode_readdir_populate(directory); if (status != CACHE_INODE_SUCCESS) { LogFullDebug(COMPONENT_NFS_READDIR, "cache_inode_readdir_populate status=%s", cache_inode_err_str(status)); goto unlock_dir; } } /* deal with initial cookie value: * 1. cookie is invalid (-should- be checked by caller) * 2. cookie is 0 (first cookie) -- ok * 3. cookie is > than highest dirent position (error) * 4. cookie <= highest dirent position but > highest cached cookie * (currently equivalent to #2, because we pre-populate the cookie * avl) * 5. cookie is in cached range -- ok */ if (cookie > 0) { /* N.B., cache_inode_avl_qp_insert_s ensures k > 2 */ if (cookie < 3) { status = CACHE_INODE_BAD_COOKIE; LogFullDebug(COMPONENT_NFS_READDIR, "Bad cookie"); goto unlock_dir; } /* we assert this can now succeed */ dirent = cache_inode_avl_lookup_k(directory, cookie, CACHE_INODE_FLAG_NEXT_ACTIVE); if (!dirent) { /* Linux (3.4, etc) has been observed to send readdir * at the offset of the last entry's cookie, and * returns no dirents to userland if that readdir * notfound or badcookie. */ if (cache_inode_avl_lookup_k (directory, cookie, CACHE_INODE_FLAG_NONE)) { /* yup, it was the last entry */ LogFullDebug(COMPONENT_NFS_READDIR, "EOD because empty result"); *eod_met = true; goto unlock_dir; } LogFullDebug(COMPONENT_NFS_READDIR, "seek to cookie=%" PRIu64 " fail", cookie); status = CACHE_INODE_BAD_COOKIE; goto unlock_dir; } /* dirent is the NEXT entry to return, since we sent * CACHE_INODE_FLAG_NEXT_ACTIVE */ dirent_node = &dirent->node_hk; } else { /* initial readdir */ dirent_node = avltree_first(&directory->object.dir.avl.t); } LogFullDebug(COMPONENT_NFS_READDIR, "About to readdir in cache_inode_readdir: directory=%p " "cookie=%" PRIu64 " collisions %d", directory, cookie, directory->object.dir.avl.collisions); /* Now satisfy the request from the cached readdir--stop when either * the requested sequence or dirent sequence is exhausted */ *nbfound = 0; *eod_met = false; for (; cb_parms.in_result && dirent_node; dirent_node = avltree_next(dirent_node)) { cache_entry_t *entry = NULL; cache_inode_status_t tmp_status = 0; dirent = avltree_container_of(dirent_node, cache_inode_dir_entry_t, node_hk); estale_retry: LogFullDebug(COMPONENT_NFS_READDIR, "Lookup direct %s", dirent->name); entry = cache_inode_get_keyed(&dirent->ckey, CIG_KEYED_FLAG_NONE, &tmp_status); if (!entry) { LogFullDebug(COMPONENT_NFS_READDIR, "Lookup returned %s", cache_inode_err_str(tmp_status)); if (retry_stale && tmp_status == CACHE_INODE_ESTALE) { LogDebug(COMPONENT_NFS_READDIR, "cache_inode_get_keyed returned %s " "for %s - retrying entry", cache_inode_err_str(tmp_status), dirent->name); retry_stale = false; /* only one retry per * dirent */ goto estale_retry; } if (tmp_status == CACHE_INODE_NOT_FOUND || tmp_status == CACHE_INODE_ESTALE) { /* Directory changed out from under us. Invalidate it, skip the name, and keep going. */ atomic_clear_uint32_t_bits( &directory->flags, CACHE_INODE_TRUST_CONTENT); LogDebug(COMPONENT_NFS_READDIR, "cache_inode_get_keyed returned %s " "for %s - skipping entry", cache_inode_err_str(tmp_status), dirent->name); continue; } else { /* Something is more seriously wrong, probably an inconsistency. */ status = tmp_status; LogCrit(COMPONENT_NFS_READDIR, "cache_inode_get_keyed returned %s " "for %s - bailing out", cache_inode_err_str(status), dirent->name); goto unlock_dir; } } LogFullDebug(COMPONENT_NFS_READDIR, "cache_inode_readdir: dirent=%p name=%s " "cookie=%" PRIu64 " (probes %d)", dirent, dirent->name, dirent->hk.k, dirent->hk.p); cb_parms.name = dirent->name; cb_parms.attr_allowed = attr_status == CACHE_INODE_SUCCESS; cb_parms.cookie = dirent->hk.k; tmp_status = cache_inode_getattr(entry, &cb_parms, cb, CB_ORIGINAL); if (tmp_status != CACHE_INODE_SUCCESS) { cache_inode_lru_unref(entry, LRU_FLAG_NONE); if (tmp_status == CACHE_INODE_ESTALE) { if (retry_stale) { LogDebug(COMPONENT_NFS_READDIR, "cache_inode_getattr returned " "%s for %s - retrying entry", cache_inode_err_str (tmp_status), dirent->name); retry_stale = false; /* only one retry * per dirent */ goto estale_retry; } /* Directory changed out from under us. Invalidate it, skip the name, and keep going. */ atomic_clear_uint32_t_bits( &directory->flags, CACHE_INODE_TRUST_CONTENT); LogDebug(COMPONENT_NFS_READDIR, "cache_inode_lock_trust_attrs " "returned %s for %s - skipping entry", cache_inode_err_str(tmp_status), dirent->name); continue; } status = tmp_status; LogCrit(COMPONENT_NFS_READDIR, "cache_inode_lock_trust_attrs returned %s for " "%s - bailing out", cache_inode_err_str(status), dirent->name); goto unlock_dir; } (*nbfound)++; cache_inode_lru_unref(entry, LRU_FLAG_NONE); if (!cb_parms.in_result) { LogDebug(COMPONENT_NFS_READDIR, "bailing out due to entry not in result"); break; } } /* We have reached the last node and every node traversed was added to the result */ LogDebug(COMPONENT_NFS_READDIR, "dirent_node = %p, nbfound = %u, in_result = %s", dirent_node, *nbfound, cb_parms.in_result ? "TRUE" : "FALSE"); if (!dirent_node && cb_parms.in_result) *eod_met = true; else *eod_met = false; unlock_dir: PTHREAD_RWLOCK_unlock(&directory->content_lock); return status; } /* cache_inode_readdir */