/* * Set various parameters in an inode which cannot be set through * normal filesystem VNOPS. */ static int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data) { hammer2_ioc_inode_t *ino = data; int error = 0; hammer2_trans_init(ip->pmp, 0); hammer2_inode_lock(ip, 0); if ((ino->flags & HAMMER2IOC_INODE_FLAG_CHECK) && ip->meta.check_algo != ino->ip_data.meta.check_algo) { hammer2_inode_modify(ip); ip->meta.check_algo = ino->ip_data.meta.check_algo; } if ((ino->flags & HAMMER2IOC_INODE_FLAG_COMP) && ip->meta.comp_algo != ino->ip_data.meta.comp_algo) { hammer2_inode_modify(ip); ip->meta.comp_algo = ino->ip_data.meta.comp_algo; } ino->kdata = ip; /* Ignore these flags for now...*/ if ((ino->flags & HAMMER2IOC_INODE_FLAG_IQUOTA) && ip->meta.inode_quota != ino->ip_data.meta.inode_quota) { hammer2_inode_modify(ip); ip->meta.inode_quota = ino->ip_data.meta.inode_quota; } if ((ino->flags & HAMMER2IOC_INODE_FLAG_DQUOTA) && ip->meta.data_quota != ino->ip_data.meta.data_quota) { hammer2_inode_modify(ip); ip->meta.data_quota = ino->ip_data.meta.data_quota; } if ((ino->flags & HAMMER2IOC_INODE_FLAG_COPIES) && ip->meta.ncopies != ino->ip_data.meta.ncopies) { hammer2_inode_modify(ip); ip->meta.ncopies = ino->ip_data.meta.ncopies; } hammer2_inode_unlock(ip); hammer2_trans_done(ip->pmp); return (error); }
/* * Create a new PFS under the super-root */ static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data) { hammer2_inode_data_t *nipdata; hammer2_chain_t *nchain; hammer2_dev_t *hmp; hammer2_ioc_pfs_t *pfs; hammer2_inode_t *nip; hammer2_tid_t mtid; int error; hmp = ip->pmp->pfs_hmps[0]; if (hmp == NULL) return (EINVAL); pfs = data; nip = NULL; if (pfs->name[0] == 0) return(EINVAL); pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure 0-termination */ if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0) return(EEXIST); hammer2_trans_init(hmp->spmp, 0); mtid = hammer2_trans_sub(hmp->spmp); nip = hammer2_inode_create(hmp->spmp->iroot, NULL, NULL, pfs->name, strlen(pfs->name), 0, 1, HAMMER2_OBJTYPE_DIRECTORY, 0, HAMMER2_INSERT_PFSROOT, &error); if (error == 0) { hammer2_inode_modify(nip); nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS); hammer2_chain_modify(nchain, mtid, 0); nipdata = &nchain->data->ipdata; nip->meta.pfs_type = pfs->pfs_type; nip->meta.pfs_subtype = pfs->pfs_subtype; nip->meta.pfs_clid = pfs->pfs_clid; nip->meta.pfs_fsid = pfs->pfs_fsid; nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT; /* * Set default compression and check algorithm. This * can be changed later. * * Do not allow compression on PFS's with the special name * "boot", the boot loader can't decompress (yet). */ nip->meta.comp_algo = HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT); nip->meta.check_algo = HAMMER2_ENC_ALGO( HAMMER2_CHECK_ISCSI32); if (strcasecmp(pfs->name, "boot") == 0) { nip->meta.comp_algo = HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO); } #if 0 hammer2_blockref_t bref; /* XXX new PFS needs to be rescanned / added */ bref = nchain->bref; kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename); hammer2_pfsalloc(nchain, nipdata, bref.modify_tid); #endif /* XXX rescan */ hammer2_chain_unlock(nchain); hammer2_chain_drop(nchain); /* * Super-root isn't mounted, fsync it */ hammer2_inode_ref(nip); hammer2_inode_unlock(nip); hammer2_inode_fsync(nip); hammer2_inode_drop(nip); } hammer2_trans_done(hmp->spmp); return (error); }
/* * Create a directory entry under dip with the specified name, inode number, * and OBJTYPE (type). * * This returns a UNIX errno code, not a HAMMER2_ERROR_* code. * * Caller must hold dip locked. */ int hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len, hammer2_key_t inum, uint8_t type) { hammer2_xop_mkdirent_t *xop; hammer2_key_t lhc; int error; lhc = 0; error = 0; KKASSERT(name != NULL); lhc = hammer2_dirhash(name, name_len); /* * Locate the inode or indirect block to create the new * entry in. At the same time check for key collisions * and iterate until we don't get one. * * Lock the directory exclusively for now to guarantee that * we can find an unused lhc for the name. Due to collisions, * two different creates can end up with the same lhc so we * cannot depend on the OS to prevent the collision. */ hammer2_inode_modify(dip); /* * If name specified, locate an unused key in the collision space. * Otherwise use the passed-in lhc directly. */ { hammer2_xop_scanlhc_t *sxop; hammer2_key_t lhcbase; lhcbase = lhc; sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); sxop->lhc = lhc; hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { if (lhc != sxop->head.cluster.focus->bref.key) break; ++lhc; } hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); if (error) { if (error != HAMMER2_ERROR_ENOENT) goto done2; ++lhc; error = 0; } if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { error = HAMMER2_ERROR_ENOSPC; goto done2; } } /* * Create the directory entry with the lhc as the key. */ xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); xop->lhc = lhc; bzero(&xop->dirent, sizeof(xop->dirent)); xop->dirent.inum = inum; xop->dirent.type = type; xop->dirent.namlen = name_len; KKASSERT(name_len < HAMMER2_INODE_MAXNAME); hammer2_xop_setname(&xop->head, name, name_len); hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc); error = hammer2_xop_collect(&xop->head, 0); hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); done2: error = hammer2_error_to_errno(error); return error; }
/* * Called with a locked inode to finish unlinking an inode after xop_unlink * had been run. This function is responsible for decrementing nlinks. * * We don't bother decrementing nlinks if the file is not open and this was * the last link. * * If the inode is a hardlink target it's chain has not yet been deleted, * otherwise it's chain has been deleted. * * If isopen then any prior deletion was not permanent and the inode is * left intact with nlinks == 0; */ int hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen) { hammer2_pfs_t *pmp; int error; pmp = ip->pmp; /* * Decrement nlinks. If this is the last link and the file is * not open we can just delete the inode and not bother dropping * nlinks to 0 (avoiding unnecessary block updates). */ if (ip->meta.nlinks == 1) { atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED); if (isopen == 0) goto killit; } hammer2_inode_modify(ip); --ip->meta.nlinks; if ((int64_t)ip->meta.nlinks < 0) ip->meta.nlinks = 0; /* safety */ /* * If nlinks is not zero we are done. However, this should only be * possible with a hardlink target. If the inode is an embedded * hardlink nlinks should have dropped to zero, warn and proceed * with the next step. */ if (ip->meta.nlinks) { if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) return 0; kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n", (intmax_t)ip->meta.nlinks); return 0; } if (ip->vp) hammer2_knote(ip->vp, NOTE_DELETE); /* * nlinks is now an implied zero, delete the inode if not open. * We avoid unnecessary media updates by not bothering to actually * decrement nlinks for the 1->0 transition * * Put the inode on the sideq to ensure that any disconnected chains * get properly flushed (so they can be freed). Defer the deletion * to the sync code, doing it now will desynchronize the inode from * related directory entries (which is bad). * * NOTE: killit can be reached without modifying the inode, so * make sure that it is on the SIDEQ. */ if (isopen == 0) { #if 0 hammer2_xop_destroy_t *xop; #endif killit: atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); hammer2_inode_delayed_sideq(ip); #if 0 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc); error = hammer2_xop_collect(&xop->head, 0); hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); #endif } error = 0; /* XXX */ return error; }
/* * Create a new, normal inode. This function will create the inode, * the media chains, but will not insert the chains onto the media topology * (doing so would require a flush transaction and cause long stalls). * * Caller must be in a normal transaction. */ hammer2_inode_t * hammer2_inode_create_normal(hammer2_inode_t *pip, struct vattr *vap, struct ucred *cred, hammer2_key_t inum, int *errorp) { hammer2_xop_create_t *xop; hammer2_inode_t *dip; hammer2_inode_t *nip; int error; uid_t xuid; uuid_t pip_uid; uuid_t pip_gid; uint32_t pip_mode; uint8_t pip_comp_algo; uint8_t pip_check_algo; hammer2_tid_t pip_inum; uint8_t type; dip = pip->pmp->iroot; KKASSERT(dip != NULL); *errorp = 0; /*hammer2_inode_lock(dip, 0);*/ pip_uid = pip->meta.uid; pip_gid = pip->meta.gid; pip_mode = pip->meta.mode; pip_comp_algo = pip->meta.comp_algo; pip_check_algo = pip->meta.check_algo; pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; /* * Create the in-memory hammer2_inode structure for the specified * inode. */ nip = hammer2_inode_get(dip->pmp, NULL, inum, -1); nip->comp_heuristic = 0; KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 && nip->cluster.nchains == 0); atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING); /* * Setup the inode meta-data */ nip->meta.type = hammer2_get_obj_type(vap->va_type); switch (nip->meta.type) { case HAMMER2_OBJTYPE_CDEV: case HAMMER2_OBJTYPE_BDEV: nip->meta.rmajor = vap->va_rmajor; nip->meta.rminor = vap->va_rminor; break; default: break; } type = nip->meta.type; KKASSERT(nip->meta.inum == inum); nip->meta.iparent = pip_inum; /* Inherit parent's inode compression mode. */ nip->meta.comp_algo = pip_comp_algo; nip->meta.check_algo = pip_check_algo; nip->meta.version = HAMMER2_INODE_VERSION_ONE; hammer2_update_time(&nip->meta.ctime); nip->meta.mtime = nip->meta.ctime; nip->meta.mode = vap->va_mode; nip->meta.nlinks = 1; xuid = hammer2_to_unix_xid(&pip_uid); xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode, xuid, cred, &vap->va_mode); if (vap->va_vaflags & VA_UID_UUID_VALID) nip->meta.uid = vap->va_uid_uuid; else if (vap->va_uid != (uid_t)VNOVAL) hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid); else hammer2_guid_to_uuid(&nip->meta.uid, xuid); if (vap->va_vaflags & VA_GID_UUID_VALID) nip->meta.gid = vap->va_gid_uuid; else if (vap->va_gid != (gid_t)VNOVAL) hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid); else nip->meta.gid = pip_gid; /* * Regular files and softlinks allow a small amount of data to be * directly embedded in the inode. This flag will be cleared if * the size is extended past the embedded limit. */ if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE || nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; } /* * Create the inode using (inum) as the key. Pass pip for * method inheritance. */ xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); xop->lhc = inum; xop->flags = 0; xop->meta = nip->meta; KKASSERT(vap); xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum); xop->meta.name_key = inum; nip->meta.name_len = xop->meta.name_len; nip->meta.name_key = xop->meta.name_key; hammer2_inode_modify(nip); /* * Create the inode media chains but leave them detached. We are * not in a flush transaction so we can't mess with media topology * above normal inodes (i.e. the index of the inodes themselves). * * We've already set the INODE_CREATING flag. The inode's media * chains will be inserted onto the media topology on the next * filesystem sync. */ hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc); error = hammer2_xop_collect(&xop->head, 0); #if INODE_DEBUG kprintf("create inode type %d error %d\n", nip->meta.type, error); #endif if (error) { *errorp = error; goto done; } /* * Associate the media chains created by the backend with the * frontend inode. */ hammer2_inode_repoint(nip, NULL, &xop->head.cluster); done: hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); /*hammer2_inode_unlock(dip);*/ return (nip); }