/* * Repoint ip->chain to nchain. Caller must hold the inode exclusively * locked. * * ip->chain is set to nchain. The prior chain in ip->chain is dropped * and nchain is ref'd. */ void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip, hammer2_chain_t *nchain) { hammer2_chain_t *ochain; hammer2_inode_t *opip; /* * Repoint ip->chain if requested. */ ochain = ip->chain; ip->chain = nchain; if (nchain) hammer2_chain_ref(nchain); if (ochain) hammer2_chain_drop(ochain); /* * Repoint ip->pip if requested (non-NULL pip). */ if (pip && ip->pip != pip) { opip = ip->pip; hammer2_inode_ref(pip); ip->pip = pip; if (opip) hammer2_inode_drop(opip); } }
/* * The caller presents a locked *chainp pointing to a HAMMER2_BREF_TYPE_INODE * with an obj_type of HAMMER2_OBJTYPE_HARDLINK. This routine will gobble * the *chainp and return a new locked *chainp representing the file target * (the original *chainp will be unlocked). * * When a match is found the chain representing the original HARDLINK * will be returned in *ochainp with a ref, but not locked. * * When no match is found *chainp is set to NULL and EIO is returned. * (*ochainp) will still be set to the original chain with a ref but not * locked. */ int hammer2_hardlink_find(hammer2_inode_t *dip, hammer2_chain_t **chainp, hammer2_chain_t **ochainp) { hammer2_chain_t *chain = *chainp; hammer2_chain_t *parent; hammer2_inode_t *ip; hammer2_inode_t *pip; hammer2_key_t key_dummy; hammer2_key_t lhc; int cache_index = -1; pip = dip; hammer2_inode_ref(pip); /* for loop */ hammer2_chain_ref(chain); /* for (*ochainp) */ *ochainp = chain; /* * Locate the hardlink. pip is referenced and not locked, * ipp. * * chain is reused. */ lhc = chain->data->ipdata.inum; hammer2_chain_unlock(chain); chain = NULL; while ((ip = pip) != NULL) { parent = hammer2_inode_lock_ex(ip); hammer2_inode_drop(ip); /* loop */ KKASSERT(parent->bref.type == HAMMER2_BREF_TYPE_INODE); chain = hammer2_chain_lookup(&parent, &key_dummy, lhc, lhc, &cache_index, 0); hammer2_chain_lookup_done(parent); /* discard parent */ if (chain) break; pip = ip->pip; /* safe, ip held locked */ if (pip) hammer2_inode_ref(pip); /* loop */ hammer2_inode_unlock_ex(ip, NULL); } /* * chain is locked, ip is locked. Unlock ip, return the locked * chain. *ipp is already set w/a ref count and not locked. * * (parent is already unlocked). */ if (ip) hammer2_inode_unlock_ex(ip, NULL); *chainp = chain; if (chain) { KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE); /* already locked */ return (0); } else { return (EIO); } }
void hammer2_inode_unlock_sh(hammer2_inode_t *ip, hammer2_chain_t *chain) { if (chain) hammer2_chain_unlock(chain); ccms_thread_unlock(&ip->topo_cst); hammer2_inode_drop(ip); }
void hammer2_inode_unlock_ex(hammer2_inode_t *ip, hammer2_chain_t *chain) { /* * XXX this will catch parent directories too which we don't * really want. */ if (chain) hammer2_chain_unlock(chain); ccms_thread_unlock(&ip->topo_cst); hammer2_inode_drop(ip); }
/* * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP * we wake them up. */ void hammer2_inode_unlock(hammer2_inode_t *ip) { if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) { atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); hammer2_mtx_unlock(&ip->lock); wakeup(&ip->flags); } else { hammer2_mtx_unlock(&ip->lock); } hammer2_inode_drop(ip); }
void hammer2_inode_unlock_ex(hammer2_inode_t *ip, hammer2_chain_t *chain) { /* * XXX this will catch parent directories too which we don't * really want. */ if (ip->chain && (ip->chain->flags & (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_SUBMODIFIED))) { atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); } if (chain) hammer2_chain_unlock(ip->hmp, chain); ccms_thread_unlock(&ip->topo_cst); hammer2_inode_drop(ip); }
/* * The passed-in chain must be locked and the returned inode will also be * locked. This routine typically locates or allocates the inode, assigns * ip->chain (adding a ref to chain if necessary), and returns the inode. * * The hammer2_inode structure regulates the interface between the high level * kernel VNOPS API and the filesystem backend (the chains). * * WARNING! This routine sucks up the chain's lock (makes it part of the * inode lock from the point of view of the inode lock API), * so callers need to be careful. * * WARNING! The mount code is allowed to pass dip == NULL for iroot and * is allowed to pass pmp == NULL and dip == NULL for sroot. */ hammer2_inode_t * hammer2_inode_get(hammer2_pfsmount_t *pmp, hammer2_inode_t *dip, hammer2_chain_t *chain) { hammer2_inode_t *nip; KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE); /* * Interlocked lookup/ref of the inode. This code is only needed * when looking up inodes with nlinks != 0 (TODO: optimize out * otherwise and test for duplicates). */ again: for (;;) { nip = hammer2_inode_lookup(pmp, chain->data->ipdata.inum); if (nip == NULL) break; ccms_thread_lock(&nip->topo_cst, CCMS_STATE_EXCLUSIVE); if ((nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { /* race */ ccms_thread_unlock(&nip->topo_cst); hammer2_inode_drop(nip); continue; } if (nip->chain != chain) hammer2_inode_repoint(nip, NULL, chain); /* * Consolidated nip/nip->chain is locked (chain locked * by caller). */ return nip; } /* * We couldn't find the inode number, create a new inode. */ if (pmp) { nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO); atomic_add_long(&pmp->inmem_inodes, 1); hammer2_chain_memory_inc(pmp); hammer2_chain_memory_wakeup(pmp); } else { nip = kmalloc(sizeof(*nip), M_HAMMER2, M_WAITOK | M_ZERO); nip->flags = HAMMER2_INODE_SROOT; } nip->inum = chain->data->ipdata.inum; nip->size = chain->data->ipdata.size; nip->mtime = chain->data->ipdata.mtime; hammer2_inode_repoint(nip, NULL, chain); nip->pip = dip; /* can be NULL */ if (dip) hammer2_inode_ref(dip); /* ref dip for nip->pip */ nip->pmp = pmp; /* * ref and lock on nip gives it state compatible to after a * hammer2_inode_lock_ex() call. */ nip->refs = 1; ccms_cst_init(&nip->topo_cst, &nip->chain); ccms_thread_lock(&nip->topo_cst, CCMS_STATE_EXCLUSIVE); /* combination of thread lock and chain lock == inode lock */ /* * Attempt to add the inode. If it fails we raced another inode * get. Undo all the work and try again. */ if (pmp) { spin_lock(&pmp->inum_spin); if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) { spin_unlock(&pmp->inum_spin); ccms_thread_unlock(&nip->topo_cst); hammer2_inode_drop(nip); goto again; } atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE); spin_unlock(&pmp->inum_spin); } return (nip); }
/* * Given an exclusively locked inode and chain we consolidate its chain * for hardlink creation, adding (nlinks) to the file's link count and * potentially relocating the inode to a directory common to ip->pip and tdip. * * Replaces (*chainp) if consolidation occurred, unlocking the old chain * and returning a new locked chain. * * NOTE! This function will also replace ip->chain. */ int hammer2_hardlink_consolidate(hammer2_trans_t *trans, hammer2_inode_t *ip, hammer2_chain_t **chainp, hammer2_inode_t *cdip, hammer2_chain_t **cdchainp, int nlinks) { hammer2_inode_data_t *ipdata; hammer2_chain_t *chain; hammer2_chain_t *nchain; int error; chain = *chainp; if (nlinks == 0 && /* no hardlink needed */ (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE)) { return (0); } if (hammer2_hardlink_enable < 0) { /* fake hardlinks */ return (0); } if (hammer2_hardlink_enable == 0) { /* disallow hardlinks */ hammer2_chain_unlock(chain); *chainp = NULL; return (ENOTSUP); } /* * If no change in the hardlink's target directory is required and * this is already a hardlink target, all we need to do is adjust * the link count. */ if (cdip == ip->pip && (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) { if (nlinks) { hammer2_chain_modify(trans, &chain, 0); chain->data->ipdata.nlinks += nlinks; } error = 0; goto done; } /* * chain is the real inode. If it's visible we have to convert it * to a hardlink pointer. If it is not visible then it is already * a hardlink target and only needs to be deleted. */ KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0); KKASSERT(chain->data->ipdata.type != HAMMER2_OBJTYPE_HARDLINK); if (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE) { /* * We are going to duplicate chain later, causing its * media block to be shifted to the duplicate. Even though * we are delete-duplicating nchain here it might decide not * to reallocate the block. Set FORCECOW to force it to. */ nchain = chain; hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS); atomic_set_int(&nchain->flags, HAMMER2_CHAIN_FORCECOW); hammer2_chain_delete_duplicate(trans, &nchain, HAMMER2_DELDUP_RECORE); KKASSERT((chain->flags & HAMMER2_CHAIN_DUPLICATED) == 0); ipdata = &nchain->data->ipdata; ipdata->target_type = ipdata->type; ipdata->type = HAMMER2_OBJTYPE_HARDLINK; ipdata->uflags = 0; ipdata->rmajor = 0; ipdata->rminor = 0; ipdata->ctime = 0; ipdata->mtime = 0; ipdata->atime = 0; ipdata->btime = 0; bzero(&ipdata->uid, sizeof(ipdata->uid)); bzero(&ipdata->gid, sizeof(ipdata->gid)); ipdata->op_flags = HAMMER2_OPFLAG_DIRECTDATA; ipdata->cap_flags = 0; ipdata->mode = 0; ipdata->size = 0; ipdata->nlinks = 1; ipdata->iparent = 0; /* XXX */ ipdata->pfs_type = 0; ipdata->pfs_inum = 0; bzero(&ipdata->pfs_clid, sizeof(ipdata->pfs_clid)); bzero(&ipdata->pfs_fsid, sizeof(ipdata->pfs_fsid)); ipdata->data_quota = 0; ipdata->data_count = 0; ipdata->inode_quota = 0; ipdata->inode_count = 0; ipdata->attr_tid = 0; ipdata->dirent_tid = 0; bzero(&ipdata->u, sizeof(ipdata->u)); /* XXX transaction ids */ } else { hammer2_chain_delete(trans, chain, 0); nchain = NULL; } /* * chain represents the hardlink target and is now flagged deleted. * duplicate it to the parent directory and adjust nlinks. * * WARNING! The shiftup() call can cause nchain to be moved into * an indirect block, and our nchain will wind up pointing * to the older/original version. */ KKASSERT(chain->flags & HAMMER2_CHAIN_DELETED); hammer2_hardlink_shiftup(trans, &chain, cdip, cdchainp, nlinks, &error); if (error == 0) hammer2_inode_repoint(ip, cdip, chain); /* * Unlock the original chain last as the lock blocked races against * the creation of the new hardlink target. */ if (nchain) hammer2_chain_unlock(nchain); done: /* * Cleanup, chain/nchain already dealt with. */ *chainp = chain; hammer2_inode_drop(cdip); return (error); }
/* * Create a new PFS under the super-root */ static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data) { hammer2_inode_data_t *nipdata; hammer2_chain_t *nchain; hammer2_dev_t *hmp; hammer2_ioc_pfs_t *pfs; hammer2_inode_t *nip; hammer2_tid_t mtid; int error; hmp = ip->pmp->pfs_hmps[0]; if (hmp == NULL) return (EINVAL); pfs = data; nip = NULL; if (pfs->name[0] == 0) return(EINVAL); pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure 0-termination */ if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0) return(EEXIST); hammer2_trans_init(hmp->spmp, 0); mtid = hammer2_trans_sub(hmp->spmp); nip = hammer2_inode_create(hmp->spmp->iroot, NULL, NULL, pfs->name, strlen(pfs->name), 0, 1, HAMMER2_OBJTYPE_DIRECTORY, 0, HAMMER2_INSERT_PFSROOT, &error); if (error == 0) { hammer2_inode_modify(nip); nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS); hammer2_chain_modify(nchain, mtid, 0); nipdata = &nchain->data->ipdata; nip->meta.pfs_type = pfs->pfs_type; nip->meta.pfs_subtype = pfs->pfs_subtype; nip->meta.pfs_clid = pfs->pfs_clid; nip->meta.pfs_fsid = pfs->pfs_fsid; nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT; /* * Set default compression and check algorithm. This * can be changed later. * * Do not allow compression on PFS's with the special name * "boot", the boot loader can't decompress (yet). */ nip->meta.comp_algo = HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT); nip->meta.check_algo = HAMMER2_ENC_ALGO( HAMMER2_CHECK_ISCSI32); if (strcasecmp(pfs->name, "boot") == 0) { nip->meta.comp_algo = HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO); } #if 0 hammer2_blockref_t bref; /* XXX new PFS needs to be rescanned / added */ bref = nchain->bref; kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename); hammer2_pfsalloc(nchain, nipdata, bref.modify_tid); #endif /* XXX rescan */ hammer2_chain_unlock(nchain); hammer2_chain_drop(nchain); /* * Super-root isn't mounted, fsync it */ hammer2_inode_ref(nip); hammer2_inode_unlock(nip); hammer2_inode_fsync(nip); hammer2_inode_drop(nip); } hammer2_trans_done(hmp->spmp); return (error); }
/* * Primary management thread for an element of a node. A thread will exist * for each element requiring management. * * No management threads are needed for the SPMP or for any PMP with only * a single MASTER. * * On the SPMP - handles bulkfree and dedup operations * On a PFS - handles remastering and synchronization */ void hammer2_primary_sync_thread(void *arg) { hammer2_thread_t *thr = arg; hammer2_pfs_t *pmp; hammer2_deferred_list_t list; hammer2_deferred_ip_t *defer; int error; pmp = thr->pmp; bzero(&list, sizeof(list)); lockmgr(&thr->lk, LK_EXCLUSIVE); while ((thr->flags & HAMMER2_THREAD_STOP) == 0) { /* * Handle freeze request */ if (thr->flags & HAMMER2_THREAD_FREEZE) { atomic_set_int(&thr->flags, HAMMER2_THREAD_FROZEN); atomic_clear_int(&thr->flags, HAMMER2_THREAD_FREEZE); } /* * Force idle if frozen until unfrozen or stopped. */ if (thr->flags & HAMMER2_THREAD_FROZEN) { lksleep(&thr->flags, &thr->lk, 0, "frozen", 0); continue; } /* * Reset state on REMASTER request */ if (thr->flags & HAMMER2_THREAD_REMASTER) { atomic_clear_int(&thr->flags, HAMMER2_THREAD_REMASTER); /* reset state */ } /* * Synchronization scan. */ kprintf("sync_slaves pfs %s clindex %d\n", pmp->pfs_names[thr->clindex], thr->clindex); hammer2_trans_init(pmp, 0); hammer2_inode_ref(pmp->iroot); for (;;) { int didbreak = 0; /* XXX lock synchronize pmp->modify_tid */ error = hammer2_sync_slaves(thr, pmp->iroot, &list); if (error != EAGAIN) break; while ((defer = list.base) != NULL) { hammer2_inode_t *nip; nip = defer->ip; error = hammer2_sync_slaves(thr, nip, &list); if (error && error != EAGAIN) break; if (hammer2_thr_break(thr)) { didbreak = 1; break; } /* * If no additional defers occurred we can * remove this one, otherwrise keep it on * the list and retry once the additional * defers have completed. */ if (defer == list.base) { --list.count; list.base = defer->next; kfree(defer, M_HAMMER2); defer = NULL; /* safety */ hammer2_inode_drop(nip); } } /* * If the thread is being remastered, frozen, or * stopped, clean up any left-over deferals. */ if (didbreak || (error && error != EAGAIN)) { kprintf("didbreak\n"); while ((defer = list.base) != NULL) { --list.count; hammer2_inode_drop(defer->ip); list.base = defer->next; kfree(defer, M_HAMMER2); } if (error == 0 || error == EAGAIN) error = EINPROGRESS; break; } } hammer2_inode_drop(pmp->iroot); hammer2_trans_done(pmp); if (error) kprintf("hammer2_sync_slaves: error %d\n", error); /* * Wait for event, or 5-second poll. */ lksleep(&thr->flags, &thr->lk, 0, "h2idle", hz * 5); } thr->td = NULL; wakeup(thr); lockmgr(&thr->lk, LK_RELEASE); /* thr structure can go invalid after this point */ }
/* * Retire a XOP. Used by both the VOP frontend and by the XOP backend. */ void hammer2_xop_retire(hammer2_xop_head_t *xop, uint32_t mask) { hammer2_xop_group_t *xgrp; hammer2_chain_t *chain; int i; xgrp = xop->xgrp; /* * Remove the frontend or remove a backend feeder. When removing * the frontend we must wakeup any backend feeders who are waiting * for FIFO space. * * XXX optimize wakeup. */ KKASSERT(xop->run_mask & mask); if (atomic_fetchadd_int(&xop->run_mask, -mask) != mask) { if (mask == HAMMER2_XOPMASK_VOP) wakeup(xop); return; } /* * Cleanup the collection cluster. */ for (i = 0; i < xop->cluster.nchains; ++i) { xop->cluster.array[i].flags = 0; chain = xop->cluster.array[i].chain; if (chain) { xop->cluster.array[i].chain = NULL; hammer2_chain_unlock(chain); hammer2_chain_drop(chain); } } /* * Cleanup the fifos, use check_counter to optimize the loop. */ mask = xop->chk_mask; for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) { hammer2_xop_fifo_t *fifo = &xop->collect[i]; while (fifo->ri != fifo->wi) { chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK]; if (chain) { hammer2_chain_unlock(chain); hammer2_chain_drop(chain); } ++fifo->ri; if (fifo->wi - fifo->ri < HAMMER2_XOPFIFO / 2) wakeup(xop); /* XXX optimize */ } mask &= ~(1U << i); } /* * The inode is only held at this point, simply drop it. */ if (xop->ip) { hammer2_inode_drop(xop->ip); xop->ip = NULL; } if (xop->ip2) { hammer2_inode_drop(xop->ip2); xop->ip2 = NULL; } if (xop->ip3) { hammer2_inode_drop(xop->ip3); xop->ip3 = NULL; } if (xop->name) { kfree(xop->name, M_HAMMER2); xop->name = NULL; xop->name_len = 0; } if (xop->name2) { kfree(xop->name2, M_HAMMER2); xop->name2 = NULL; xop->name2_len = 0; } objcache_put(cache_xops, xop); }
/* * Given an exclusively locked inode we consolidate its chain for hardlink * creation, adding (nlinks) to the file's link count and potentially * relocating the inode to a directory common to ip->pip and tdip. * * Replaces (*chainp) if consolidation occurred, unlocking the old chain * and returning a new locked chain. * * NOTE! This function will also replace ip->chain. */ int hammer2_hardlink_consolidate(hammer2_trans_t *trans, hammer2_inode_t *ip, hammer2_chain_t **chainp, hammer2_inode_t *tdip, int nlinks) { hammer2_inode_data_t *ipdata; hammer2_inode_t *fdip; hammer2_inode_t *cdip; hammer2_chain_t *chain; hammer2_chain_t *nchain; int error; chain = *chainp; if (nlinks == 0 && /* no hardlink needed */ (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE)) { return (0); } if (hammer2_hardlink_enable < 0) { /* fake hardlinks */ return (0); } if (hammer2_hardlink_enable == 0) { /* disallow hardlinks */ hammer2_chain_unlock(chain); *chainp = NULL; return (ENOTSUP); } /* * cdip will be returned with a ref, but not locked. */ fdip = ip->pip; cdip = hammer2_inode_common_parent(fdip, tdip); /* * If no change in the hardlink's target directory is required and * this is already a hardlink target, all we need to do is adjust * the link count. * * XXX The common parent is a big wiggly due to duplication from * renames. Compare the core (RBTREE) pointer instead of the * ip's. */ if (cdip == fdip && (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) { if (nlinks) { hammer2_chain_modify(trans, &chain, 0); chain->data->ipdata.nlinks += nlinks; } error = 0; goto done; } /* * We either have to move an existing hardlink target or we have * to create a fresh hardlink target. * * Hardlink targets are hidden inodes in a parent directory common * to all directory entries referencing the hardlink. */ nchain = hammer2_hardlink_shiftup(trans, &chain, cdip, &error); if (error == 0) { /* * Bump nlinks on duplicated hidden inode, repoint * ip->chain. */ hammer2_chain_modify(trans, &nchain, 0); nchain->data->ipdata.nlinks += nlinks; hammer2_inode_repoint(ip, cdip, nchain); /* * If the old chain is not a hardlink target then replace * it with a OBJTYPE_HARDLINK pointer. * * If the old chain IS a hardlink target then delete it. */ if (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE) { /* * Replace original non-hardlink that's been dup'd * with a special hardlink directory entry. We must * set the DIRECTDATA flag to prevent sub-chains * from trying to synchronize to the inode if the * file is extended afterwords. */ hammer2_chain_modify(trans, &chain, 0); hammer2_chain_delete_duplicate(trans, &chain, HAMMER2_DELDUP_RECORE); ipdata = &chain->data->ipdata; ipdata->target_type = ipdata->type; ipdata->type = HAMMER2_OBJTYPE_HARDLINK; ipdata->uflags = 0; ipdata->rmajor = 0; ipdata->rminor = 0; ipdata->ctime = 0; ipdata->mtime = 0; ipdata->atime = 0; ipdata->btime = 0; bzero(&ipdata->uid, sizeof(ipdata->uid)); bzero(&ipdata->gid, sizeof(ipdata->gid)); ipdata->op_flags = HAMMER2_OPFLAG_DIRECTDATA; ipdata->cap_flags = 0; ipdata->mode = 0; ipdata->size = 0; ipdata->nlinks = 1; ipdata->iparent = 0; /* XXX */ ipdata->pfs_type = 0; ipdata->pfs_inum = 0; bzero(&ipdata->pfs_clid, sizeof(ipdata->pfs_clid)); bzero(&ipdata->pfs_fsid, sizeof(ipdata->pfs_fsid)); ipdata->data_quota = 0; ipdata->data_count = 0; ipdata->inode_quota = 0; ipdata->inode_count = 0; ipdata->attr_tid = 0; ipdata->dirent_tid = 0; bzero(&ipdata->u, sizeof(ipdata->u)); /* XXX transaction ids */ } else { hammer2_chain_delete(trans, chain); } /* * Return the new chain. */ hammer2_chain_unlock(chain); chain = nchain; } else { /* * Return an error */ hammer2_chain_unlock(chain); chain = NULL; } /* * Cleanup, chain/nchain already dealt with. */ done: *chainp = chain; hammer2_inode_drop(cdip); return (error); }
/* * XXX this API needs a rewrite. It needs to be split into a * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get * rid of the inode/chain lock reversal fudge. * * Returns the inode associated with the passed-in cluster, allocating a new * hammer2_inode structure if necessary, then synchronizing it to the passed * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx) * is synchronized. Otherwise the whole cluster is synchronized. inum will * be extracted from the passed-in xop and the inum argument will be ignored. * * If xop is passed as NULL then a new hammer2_inode is allocated with the * specified inum, and returned. For normal inodes, the inode will be * indexed in memory and if it already exists the existing ip will be * returned instead of allocating a new one. The superroot and PFS inodes * are not indexed in memory. * * The passed-in cluster must be locked and will remain locked on return. * The returned inode will be locked and the caller may dispose of both * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller * needs to resolve a hardlink it must ref/unlock/relock/drop the inode. * * The hammer2_inode structure regulates the interface between the high level * kernel VNOPS API and the filesystem backend (the chains). * * On return the inode is locked with the supplied cluster. */ hammer2_inode_t * hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop, hammer2_tid_t inum, int idx) { hammer2_inode_t *nip; const hammer2_inode_data_t *iptmp; const hammer2_inode_data_t *nipdata; KKASSERT(xop == NULL || hammer2_cluster_type(&xop->cluster) == HAMMER2_BREF_TYPE_INODE); KKASSERT(pmp); /* * Interlocked lookup/ref of the inode. This code is only needed * when looking up inodes with nlinks != 0 (TODO: optimize out * otherwise and test for duplicates). * * Cluster can be NULL during the initial pfs allocation. */ if (xop) { iptmp = &hammer2_xop_gdata(xop)->ipdata; inum = iptmp->meta.inum; hammer2_xop_pdata(xop); } again: nip = hammer2_inode_lookup(pmp, inum); if (nip) { /* * We may have to unhold the cluster to avoid a deadlock * against vnlru (and possibly other XOPs). */ if (xop) { if (hammer2_mtx_ex_try(&nip->lock) != 0) { hammer2_cluster_unhold(&xop->cluster); hammer2_mtx_ex(&nip->lock); hammer2_cluster_rehold(&xop->cluster); } } else { hammer2_mtx_ex(&nip->lock); } /* * Handle SMP race (not applicable to the super-root spmp * which can't index inodes due to duplicative inode numbers). */ if (pmp->spmp_hmp == NULL && (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { hammer2_mtx_unlock(&nip->lock); hammer2_inode_drop(nip); goto again; } if (xop) { if (idx >= 0) hammer2_inode_repoint_one(nip, &xop->cluster, idx); else hammer2_inode_repoint(nip, NULL, &xop->cluster); } return nip; } /* * We couldn't find the inode number, create a new inode and try to * insert it, handle insertion races. */ nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO); spin_init(&nip->cluster_spin, "h2clspin"); atomic_add_long(&pmp->inmem_inodes, 1); if (pmp->spmp_hmp) nip->flags = HAMMER2_INODE_SROOT; /* * Initialize nip's cluster. A cluster is provided for normal * inodes but typically not for the super-root or PFS inodes. */ nip->cluster.refs = 1; nip->cluster.pmp = pmp; nip->cluster.flags |= HAMMER2_CLUSTER_INODE; if (xop) { nipdata = &hammer2_xop_gdata(xop)->ipdata; nip->meta = nipdata->meta; hammer2_xop_pdata(xop); atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD); hammer2_inode_repoint(nip, NULL, &xop->cluster); } else { nip->meta.inum = inum; /* PFS inum is always 1 XXX */ /* mtime will be updated when a cluster is available */ atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD); /*XXX*/ } nip->pmp = pmp; /* * ref and lock on nip gives it state compatible to after a * hammer2_inode_lock() call. */ nip->refs = 1; hammer2_mtx_init(&nip->lock, "h2inode"); hammer2_mtx_init(&nip->truncate_lock, "h2trunc"); hammer2_mtx_ex(&nip->lock); TAILQ_INIT(&nip->depend_static.sideq); /* combination of thread lock and chain lock == inode lock */ /* * Attempt to add the inode. If it fails we raced another inode * get. Undo all the work and try again. */ if (pmp->spmp_hmp == NULL) { hammer2_spin_ex(&pmp->inum_spin); if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) { hammer2_spin_unex(&pmp->inum_spin); hammer2_mtx_unlock(&nip->lock); hammer2_inode_drop(nip); goto again; } atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE); ++pmp->inum_count; hammer2_spin_unex(&pmp->inum_spin); } return (nip); }