int _kvm_stat_ntfs(kvm_t *kd, struct kinfo_file *kf, struct vnode *vp) { struct ntnode ntnode; struct fnode fn; struct ntfsmount ntm; /* * To get the ntnode, we have to go in two steps - firstly * to read appropriate struct fnode and then getting the address * of ntnode and reading it's contents */ if (KREAD(kd, (u_long)VTOF(vp), &fn)) { _kvm_err(kd, kd->program, "can't read fnode at %p", VTOF(vp)); return (-1); } if (KREAD(kd, (u_long)FTONT(&fn), &ntnode)) { _kvm_err(kd, kd->program, "can't read ntnode at %p", FTONT(&fn)); return (-1); } if (KREAD(kd, (u_long)ntnode.i_mp, &ntm)) { _kvm_err(kd, kd->program, "can't read ntfsmount at %p", ntnode.i_mp); return (-1); } kf->va_fsid = ntnode.i_dev & 0xffff; kf->va_fileid = (long)ntnode.i_number; kf->va_mode = (mode_t)ntm.ntm_mode | _kvm_getftype(vp->v_type); kf->va_size = fn.f_size; kf->va_rdev = 0; /* XXX */ return (0); }
/* * Reclaim an fnode/ntnode so that it can be used for other purposes. */ int ntfs_reclaim(void *v) { struct vop_reclaim_args *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct proc *p = ap->a_p; int error; dprintf(("ntfs_reclaim: vnode: %p, ntnode: %d\n", vp, ip->i_number)); #ifdef DIAGNOSTIC if (ntfs_prtactive && vp->v_usecount != 0) vprint("ntfs_reclaim: pushing active", vp); #endif if ((error = ntfs_ntget(ip, p)) != 0) return (error); /* Purge old data structures associated with the inode. */ cache_purge(vp); ntfs_frele(fp); ntfs_ntput(ip, p); vp->v_data = NULL; return (0); }
static int ntfs_write(void *v) { struct vop_write_args *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct uio *uio = ap->a_uio; struct ntfsmount *ntmp = ip->i_mp; u_int64_t towrite; size_t written; int error; dprintf(("ntfs_write: ino: %d, off: %d resid: %d, segflg: %d\n",ip->i_number,(u_int32_t)uio->uio_offset,uio->uio_resid,uio->uio_segflg)); dprintf(("ntfs_write: filesize: %d",(u_int32_t)fp->f_size)); if (uio->uio_resid + uio->uio_offset > fp->f_size) { printf("ntfs_write: CAN'T WRITE BEYOND END OF FILE\n"); return (EFBIG); } towrite = MIN(uio->uio_resid, fp->f_size - uio->uio_offset); dprintf((", towrite: %d\n",(u_int32_t)towrite)); error = ntfs_writeattr_plain(ntmp, ip, fp->f_attrtype, fp->f_attrname, uio->uio_offset, towrite, NULL, &written, uio); #ifdef NTFS_DEBUG if (error) printf("ntfs_write: ntfs_writeattr failed: %d\n", error); #endif return (error); }
static int ntfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) { struct ntfsmount *ntmp = VFSTONTFS(mp); u_int64_t mftallocated; dprintf(("ntfs_statfs():\n")); mftallocated = VTOF(ntmp->ntm_sysvn[NTFS_MFTINO])->f_allocated; sbp->f_type = mp->mnt_vfc->vfc_typenum; sbp->f_bsize = ntmp->ntm_bps; sbp->f_iosize = ntmp->ntm_bps * ntmp->ntm_spc; sbp->f_blocks = ntmp->ntm_bootfile.bf_spv; sbp->f_bfree = sbp->f_bavail = ntfs_cntobn(ntmp->ntm_cfree); sbp->f_ffree = sbp->f_bfree / ntmp->ntm_bpmftrec; sbp->f_files = mftallocated / ntfs_bntob(ntmp->ntm_bpmftrec) + sbp->f_ffree; if (sbp != &mp->mnt_stat) { bcopy((caddr_t)mp->mnt_stat.f_mntfromname, (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); } sbp->f_flags = mp->mnt_flag; return (0); }
static int ntfs_getattr(void *v) { struct vop_getattr_args /* { struct vnode *a_vp; struct vattr *a_vap; kauth_cred_t a_cred; } */ *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct vattr *vap = ap->a_vap; dprintf(("ntfs_getattr: %llu, flags: %d\n", (unsigned long long)ip->i_number, ip->i_flag)); vap->va_fsid = ip->i_dev; vap->va_fileid = ip->i_number; vap->va_mode = ip->i_mp->ntm_mode; vap->va_nlink = ip->i_nlink; vap->va_uid = ip->i_mp->ntm_uid; vap->va_gid = ip->i_mp->ntm_gid; vap->va_rdev = 0; /* XXX UNODEV ? */ vap->va_size = fp->f_size; vap->va_bytes = fp->f_allocated; vap->va_atime = ntfs_nttimetounix(fp->f_times.t_access); vap->va_mtime = ntfs_nttimetounix(fp->f_times.t_write); vap->va_ctime = ntfs_nttimetounix(fp->f_times.t_create); vap->va_flags = ip->i_flag; vap->va_gen = 0; vap->va_blocksize = ip->i_mp->ntm_spc * ip->i_mp->ntm_bps; vap->va_type = vp->v_type; vap->va_filerev = 0; return (0); }
/* * Reclaim an fnode/ntnode so that it can be used for other purposes. */ int ntfs_reclaim(void *v) { struct vop_reclaim_args /* { struct vnode *a_vp; } */ *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); int error; dprintf(("ntfs_reclaim: vnode: %p, ntnode: %llu\n", vp, (unsigned long long)ip->i_number)); if (prtactive && vp->v_usecount > 1) vprint("ntfs_reclaim: pushing active", vp); if ((error = ntfs_ntget(ip)) != 0) return (error); if (ip->i_devvp) { vrele(ip->i_devvp); ip->i_devvp = NULL; } genfs_node_destroy(vp); ntfs_frele(fp); ntfs_ntput(ip); vp->v_data = NULL; return (0); }
static int ntfs_vptofh( struct vnode *vp, struct fid *fhp, size_t *fh_size) { struct ntnode *ntp; struct ntfid ntfh; struct fnode *fn; if (*fh_size < sizeof(struct ntfid)) { *fh_size = sizeof(struct ntfid); return E2BIG; } *fh_size = sizeof(struct ntfid); ddprintf(("ntfs_fhtovp(): %s: %p\n", vp->v_mount->mnt_stat.f_mntonname, vp)); fn = VTOF(vp); ntp = VTONT(vp); memset(&ntfh, 0, sizeof(ntfh)); ntfh.ntfid_len = sizeof(struct ntfid); ntfh.ntfid_ino = ntp->i_number; ntfh.ntfid_attr = fn->f_attrtype; #ifdef notyet ntfh.ntfid_gen = ntp->i_gen; #endif memcpy(fhp, &ntfh, sizeof(ntfh)); return (0); }
static int ntfs_statvfs( struct mount *mp, struct statvfs *sbp) { struct ntfsmount *ntmp = VFSTONTFS(mp); u_int64_t mftallocated; dprintf(("ntfs_statvfs():\n")); mftallocated = VTOF(ntmp->ntm_sysvn[NTFS_MFTINO])->f_allocated; sbp->f_bsize = ntmp->ntm_bps; sbp->f_frsize = sbp->f_bsize; /* XXX */ sbp->f_iosize = ntmp->ntm_bps * ntmp->ntm_spc; sbp->f_blocks = ntmp->ntm_bootfile.bf_spv; sbp->f_bfree = sbp->f_bavail = ntfs_cntobn(ntmp->ntm_cfree); sbp->f_ffree = sbp->f_favail = sbp->f_bfree / ntmp->ntm_bpmftrec; sbp->f_files = mftallocated / ntfs_bntob(ntmp->ntm_bpmftrec) + sbp->f_ffree; sbp->f_fresvd = sbp->f_bresvd = 0; /* XXX */ sbp->f_flag = mp->mnt_flag; copy_statvfs_info(sbp, mp); return (0); }
int ntfs_calccfree(struct ntfsmount *ntmp, cn_t *cfreep) { struct vnode *vp; u_int8_t *tmp; int j, error; long cfree = 0; size_t bmsize, i; vp = ntmp->ntm_sysvn[NTFS_BITMAPINO]; bmsize = VTOF(vp)->f_size; tmp = kmalloc(bmsize, M_TEMP, M_WAITOK); error = ntfs_readattr(ntmp, VTONT(vp), NTFS_A_DATA, NULL, 0, bmsize, tmp, NULL); if (error) goto out; for(i=0;i<bmsize;i++) for(j=0;j<8;j++) if(~tmp[i] & (1 << j)) cfree++; *cfreep = cfree; out: kfree(tmp, M_TEMP); return(error); }
/* * This function provides percentage of free nodes vs total nodes for each * individual metadata btrees, i.e. for catalog, overflow extents and * attributes btree. This information is not applicable for allocation * file and journal file. */ static errno_t hfs_fsinfo_metadata_percentfree(struct hfsmount *hfsmp, struct hfs_fsinfo_metadata *fsinfo) { int lockflags = 0; int ret_lockflags = 0; BTreeControlBlockPtr btreePtr; uint32_t free_nodes, total_nodes; /* * Getting total and used nodes for all metadata btrees should * be a relatively quick operation, so we grab locks for all the * btrees at the same time */ lockflags = SFL_CATALOG | SFL_EXTENTS | SFL_BITMAP | SFL_ATTRIBUTE; ret_lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_SHARED_LOCK); /* Overflow extents btree */ btreePtr = VTOF(hfsmp->hfs_extents_vp)->fcbBTCBPtr; total_nodes = btreePtr->totalNodes; free_nodes = btreePtr->freeNodes; fsinfo->extents = hfs_percent(free_nodes, total_nodes); /* Catalog btree */ btreePtr = VTOF(hfsmp->hfs_catalog_vp)->fcbBTCBPtr; total_nodes = btreePtr->totalNodes; free_nodes = btreePtr->freeNodes; fsinfo->catalog = hfs_percent(free_nodes, total_nodes); /* Attributes btree */ if (hfsmp->hfs_attribute_vp) { btreePtr = VTOF(hfsmp->hfs_attribute_vp)->fcbBTCBPtr; total_nodes = btreePtr->totalNodes; free_nodes = btreePtr->freeNodes; fsinfo->attribute = hfs_percent(free_nodes, total_nodes); } hfs_systemfile_unlock(hfsmp, ret_lockflags); return 0; }
/* * Alternative version of fifo_fastoff() * optimized for putmsg/getmsg. */ void fifo_vfastoff(vnode_t *vp) { fifonode_t *fnp = VTOF(vp); mutex_enter(&fnp->fn_lock->flk_lock); if (!(fnp->fn_flag & FIFOFAST)) { mutex_exit(&fnp->fn_lock->flk_lock); return; } fifo_fastoff(fnp); mutex_exit(&fnp->fn_lock->flk_lock); }
/* * Clean up the state of a FIFO and/or mounted pipe in the * event that a fifo_open() was interrupted while the * process was blocked. */ void fifo_cleanup(vnode_t *vp, int flag) { fifonode_t *fnp = VTOF(vp); ASSERT(MUTEX_HELD(&fnp->fn_lock->flk_lock)); cleanlocks(vp, curproc->p_pid, 0); cleanshares(vp, curproc->p_pid); if (flag & FREAD) { fnp->fn_rcnt--; } if (flag & FWRITE) { fnp->fn_wcnt--; } cv_broadcast(&fnp->fn_wait_cv); }
/*ARGSUSED*/ int connclose(queue_t *q, int cflag, cred_t *crp) { vnode_t *streamvp; fifonode_t *streamfnp; qprocsoff(q); streamvp = strq2vp(q); ASSERT(streamvp != NULL); ASSERT(streamvp->v_type == VFIFO); streamfnp = VTOF(streamvp); streamfnp->fn_flag &= ~FIFOCONNLD; VN_RELE(streamvp); return (0); }
/*ARGSUSED*/ int connopen(queue_t *rqp, dev_t *devp, int flag, int sflag, cred_t *crp) { int error = 0; vnode_t *streamvp; fifonode_t *streamfnp; if ((streamvp = strq2vp(rqp)) == NULL) { return (EINVAL); } /* * CONNLD is only allowed to be pushed onto a "pipe" that has both * of its ends open. */ if (streamvp->v_type != VFIFO) { error = EINVAL; goto out; } streamfnp = VTOF(streamvp); if (!(streamfnp->fn_flag & ISPIPE) || streamfnp->fn_dest->fn_open == 0) { error = EPIPE; goto out; } /* * If this is the first time CONNLD was opened while on this stream, * it is being pushed. Therefore, set a flag and return 0. */ if (rqp->q_ptr == 0) { if (streamfnp->fn_flag & FIFOCONNLD) { error = ENXIO; goto out; } rqp->q_ptr = (caddr_t)1; streamfnp->fn_flag |= FIFOCONNLD; qprocson(rqp); } out: VN_RELE(streamvp); return (error); }
static int ntfs_read(void *v) { struct vop_read_args /* { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; kauth_cred_t a_cred; } */ *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct uio *uio = ap->a_uio; struct ntfsmount *ntmp = ip->i_mp; u_int64_t toread; int error; dprintf(("ntfs_read: ino: %llu, off: %qd resid: %qd\n", (unsigned long long)ip->i_number, (long long)uio->uio_offset, (long long)uio->uio_resid)); dprintf(("ntfs_read: filesize: %qu",(long long)fp->f_size)); /* don't allow reading after end of file */ if (uio->uio_offset > fp->f_size) toread = 0; else toread = MIN(uio->uio_resid, fp->f_size - uio->uio_offset ); dprintf((", toread: %qu\n",(long long)toread)); if (toread == 0) return (0); error = ntfs_readattr(ntmp, ip, fp->f_attrtype, fp->f_attrname, uio->uio_offset, toread, NULL, uio); if (error) { printf("ntfs_read: ntfs_readattr failed: %d\n",error); return (error); } return (0); }
/* * Reclaim an fnode/ntnode so that it can be used for other purposes. */ int ntfs_reclaim(void *v) { struct vop_reclaim_args /* { struct vnode *a_vp; } */ *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); const int attrlen = strlen(fp->f_attrname); int error; dprintf(("ntfs_reclaim: vnode: %p, ntnode: %llu\n", vp, (unsigned long long)ip->i_number)); if (prtactive && vp->v_usecount > 1) vprint("ntfs_reclaim: pushing active", vp); if ((error = ntfs_ntget(ip)) != 0) return (error); vcache_remove(vp->v_mount, fp->f_key, NTKEY_SIZE(attrlen)); if (ip->i_devvp) { vrele(ip->i_devvp); ip->i_devvp = NULL; } genfs_node_destroy(vp); vp->v_data = NULL; /* Destroy fnode. */ if (fp->f_key != &fp->f_smallkey) kmem_free(fp->f_key, NTKEY_SIZE(attrlen)); if (fp->f_dirblbuf) free(fp->f_dirblbuf, M_NTFSDIR); kmem_free(fp, sizeof(*fp)); ntfs_ntrele(ip); ntfs_ntput(ip); return (0); }
static int ntfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) { struct ntfsmount *ntmp = VFSTONTFS(mp); u_int64_t mftallocated; dprintf(("ntfs_statvfs():\n")); mftallocated = VTOF(ntmp->ntm_sysvn[NTFS_MFTINO])->f_allocated; sbp->f_type = mp->mnt_vfc->vfc_typenum; sbp->f_bsize = ntmp->ntm_bps; sbp->f_blocks = ntmp->ntm_bootfile.bf_spv; sbp->f_bfree = sbp->f_bavail = ntmp->ntm_cfree * ntmp->ntm_spc; sbp->f_ffree = sbp->f_bfree / ntmp->ntm_bpmftrec; sbp->f_files = mftallocated / (ntmp->ntm_bpmftrec * ntmp->ntm_bps) + sbp->f_ffree; return (0); }
static int ntfs_write(void *v) { struct vop_write_args /* { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; kauth_cred_t a_cred; } */ *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct uio *uio = ap->a_uio; struct ntfsmount *ntmp = ip->i_mp; u_int64_t towrite; size_t written; int error; dprintf(("ntfs_write: ino: %llu, off: %qd resid: %qd\n", (unsigned long long)ip->i_number, (long long)uio->uio_offset, (long long)uio->uio_resid)); dprintf(("ntfs_write: filesize: %qu",(long long)fp->f_size)); if (uio->uio_resid + uio->uio_offset > fp->f_size) { printf("ntfs_write: CAN'T WRITE BEYOND END OF FILE\n"); return (EFBIG); } towrite = MIN(uio->uio_resid, fp->f_size - uio->uio_offset); dprintf((", towrite: %qu\n",(long long)towrite)); error = ntfs_writeattr_plain(ntmp, ip, fp->f_attrtype, fp->f_attrname, uio->uio_offset, towrite, NULL, &written, uio); #ifdef NTFS_DEBUG if (error) printf("ntfs_write: ntfs_writeattr failed: %d\n", error); #endif return (error); }
int ntfs_getattr(void *v) { struct vop_getattr_args *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct vattr *vap = ap->a_vap; DPRINTF("ntfs_getattr: %u, flags: %u\n", ip->i_number, ip->i_flag); vap->va_fsid = ip->i_dev; vap->va_fileid = ip->i_number; vap->va_mode = ip->i_mp->ntm_mode; vap->va_nlink = ip->i_nlink; vap->va_uid = ip->i_mp->ntm_uid; vap->va_gid = ip->i_mp->ntm_gid; vap->va_rdev = 0; /* XXX UNODEV ? */ vap->va_size = fp->f_size; vap->va_bytes = fp->f_allocated; vap->va_atime = ntfs_nttimetounix(fp->f_times.t_access); vap->va_mtime = ntfs_nttimetounix(fp->f_times.t_write); vap->va_ctime = ntfs_nttimetounix(fp->f_times.t_create); vap->va_flags = ip->i_flag; vap->va_gen = 0; vap->va_blocksize = ip->i_mp->ntm_spc * ip->i_mp->ntm_bps; vap->va_type = vp->v_type; vap->va_filerev = 0; /* * Ensure that a directory link count is always 1 so that things * like fts_read() do not try to be smart and end up skipping over * directories. Additionally, ip->i_nlink will not be initialised * until the ntnode has been loaded for the file. */ if (vp->v_type == VDIR || ip->i_nlink < 1) vap->va_nlink = 1; return (0); }
int ntfs_read(void *v) { struct vop_read_args *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct uio *uio = ap->a_uio; struct ntfsmount *ntmp = ip->i_mp; u_int64_t toread; int error; DPRINTF("ntfs_read: ino: %u, off: %lld resid: %zu, segflg: %d\n", ip->i_number, uio->uio_offset, uio->uio_resid, uio->uio_segflg); DPRINTF("ntfs_read: filesize: %llu", fp->f_size); /* don't allow reading after end of file */ if (uio->uio_offset > fp->f_size) toread = 0; else toread = MIN(uio->uio_resid, fp->f_size - uio->uio_offset); DPRINTF(", toread: %llu\n", toread); if (toread == 0) return (0); error = ntfs_readattr(ntmp, ip, fp->f_attrtype, fp->f_attrname, uio->uio_offset, toread, NULL, uio); if (error) { printf("ntfs_read: ntfs_readattr failed: %d\n",error); return (error); } return (0); }
/* * Stream a pipe/FIFO. * The FIFOCONNLD flag is used when CONNLD has been pushed on the stream. * If the flag is set, a new vnode is created by calling fifo_connld(). * Connld logic was moved to fifo_connld() to speed up the open * operation, simplify the connld/fifo interaction, and remove inherent * race conditions between the connld module and fifos. * This routine is single threaded for two reasons. * 1) connld requests are synchronous; that is, they must block * until the server does an I_RECVFD (oh, well). Single threading is * the simplest way to accomplish this. * 2) fifo_close() must not send M_HANGUP or M_ERROR while we are * in stropen. Stropen() has a tendency to reset things and * we would like streams to remember that a hangup occurred. */ int fifo_stropen(vnode_t **vpp, int flag, cred_t *crp, int dotwist, int lockheld) { int error = 0; vnode_t *oldvp = *vpp; fifonode_t *fnp = VTOF(*vpp); dev_t pdev = 0; int firstopen = 0; fifolock_t *fn_lock; fn_lock = fnp->fn_lock; if (!lockheld) mutex_enter(&fn_lock->flk_lock); ASSERT(MUTEX_HELD(&fnp->fn_lock->flk_lock)); /* * FIFO is in the process of opening. Wait for it * to complete before starting another open on it * This prevents races associated with connld open */ while (fnp->fn_flag & FIFOOPEN) { if (!cv_wait_sig(&fnp->fn_wait_cv, &fn_lock->flk_lock)) { fifo_cleanup(oldvp, flag); if (!lockheld) mutex_exit(&fn_lock->flk_lock); return (EINTR); } } /* * The other end of the pipe is almost closed so * reject any other open on this end of the pipe * This only happens with a pipe mounted under namefs */ if ((fnp->fn_flag & (FIFOCLOSE|ISPIPE)) == (FIFOCLOSE|ISPIPE)) { fifo_cleanup(oldvp, flag); cv_broadcast(&fnp->fn_wait_cv); if (!lockheld) mutex_exit(&fn_lock->flk_lock); return (ENXIO); } fnp->fn_flag |= FIFOOPEN; /* * can't allow close to happen while we are * in the middle of stropen(). * M_HANGUP and M_ERROR could leave the stream in a strange state */ while (fn_lock->flk_ocsync) cv_wait(&fn_lock->flk_wait_cv, &fn_lock->flk_lock); fn_lock->flk_ocsync = 1; if (fnp->fn_flag & FIFOCONNLD) { /* * This is a reopen, so we should release the fifo lock * just in case some strange module pushed on connld * has some odd side effect. * Note: this stropen is on the oldvp. It will * have no impact on the connld vp returned and * strclose() will only be called when we release * flk_ocsync */ mutex_exit(&fn_lock->flk_lock); if ((error = stropen(oldvp, &pdev, flag, crp)) != 0) { mutex_enter(&fn_lock->flk_lock); fifo_cleanup(oldvp, flag); fn_lock->flk_ocsync = 0; cv_broadcast(&fn_lock->flk_wait_cv); goto out; } /* * streams open done, allow close on other end if * required. Do this now.. it could * be a very long time before fifo_connld returns. */ mutex_enter(&fn_lock->flk_lock); /* * we need to fake an open here so that if this * end of the pipe closes, we don't loose the * stream head (kind of like single threading * open and close for this end of the pipe) * We'll need to call fifo_close() to do clean * up in case this end of the pipe was closed * down while we were in fifo_connld() */ ASSERT(fnp->fn_open > 0); fnp->fn_open++; fn_lock->flk_ocsync = 0; cv_broadcast(&fn_lock->flk_wait_cv); mutex_exit(&fn_lock->flk_lock); /* * Connld has been pushed onto the pipe * Create new pipe on behalf of connld */ if (error = fifo_connld(vpp, flag, crp)) { (void) fifo_close(oldvp, flag, 1, 0, crp, NULL); mutex_enter(&fn_lock->flk_lock); goto out; } /* * undo fake open. We need to call fifo_close * because some other thread could have done * a close and detach of the named pipe while * we were in fifo_connld(), so * we want to make sure the close completes (yuk) */ (void) fifo_close(oldvp, flag, 1, 0, crp, NULL); /* * fifo_connld has changed the vp, so we * need to re-initialize locals */ fnp = VTOF(*vpp); fn_lock = fnp->fn_lock; mutex_enter(&fn_lock->flk_lock); } else { /* * release lock in case there are modules pushed that * could have some strange side effect */ mutex_exit(&fn_lock->flk_lock); /* * If this is the first open of a fifo (dotwist * will be non-zero) we will need to twist the queues. */ if (oldvp->v_stream == NULL) firstopen = 1; /* * normal open of pipe/fifo */ if ((error = stropen(oldvp, &pdev, flag, crp)) != 0) { mutex_enter(&fn_lock->flk_lock); fifo_cleanup(oldvp, flag); ASSERT(fnp->fn_open != 0 || oldvp->v_stream == NULL); fn_lock->flk_ocsync = 0; cv_broadcast(&fn_lock->flk_wait_cv); goto out; } mutex_enter(&fn_lock->flk_lock); /* * twist the ends of the fifo together */ if (dotwist && firstopen) strmate(*vpp, *vpp); /* * Show that this open has succeeded * and allow closes or other opens to proceed */ fnp->fn_open++; fn_lock->flk_ocsync = 0; cv_broadcast(&fn_lock->flk_wait_cv); } out: fnp->fn_flag &= ~FIFOOPEN; if (error == 0) { fnp->fn_flag |= FIFOISOPEN; /* * If this is a FIFO and has the close flag set * and there are now writers, clear the close flag * Note: close flag only gets set when last writer * on a FIFO goes away. */ if (((fnp->fn_flag & (ISPIPE|FIFOCLOSE)) == FIFOCLOSE) && fnp->fn_wcnt > 0) fnp->fn_flag &= ~FIFOCLOSE; } cv_broadcast(&fnp->fn_wait_cv); if (!lockheld) mutex_exit(&fn_lock->flk_lock); return (error); }
/* * Calculate the logical to physical mapping if not done already, * then call the device strategy routine. */ int ntfs_strategy(void *v) { struct vop_strategy_args /* { struct vnode *a_vp; struct buf *a_bp; } */ *ap = v; struct buf *bp = ap->a_bp; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct ntfsmount *ntmp = ip->i_mp; int error; dprintf(("ntfs_strategy: blkno: %d, lblkno: %d\n", (u_int32_t)bp->b_blkno, (u_int32_t)bp->b_lblkno)); dprintf(("strategy: bcount: %u flags: 0x%x\n", (u_int32_t)bp->b_bcount,bp->b_flags)); if (bp->b_flags & B_READ) { u_int32_t toread; if (ntfs_cntob(bp->b_blkno) >= fp->f_size) { clrbuf(bp); error = 0; } else { toread = MIN(bp->b_bcount, fp->f_size - ntfs_cntob(bp->b_blkno)); dprintf(("ntfs_strategy: toread: %d, fsize: %d\n", toread,(u_int32_t)fp->f_size)); error = ntfs_readattr(ntmp, ip, fp->f_attrtype, fp->f_attrname, ntfs_cntob(bp->b_blkno), toread, bp->b_data, NULL); if (error) { printf("ntfs_strategy: ntfs_readattr failed\n"); bp->b_error = error; } memset((char *)bp->b_data + toread, 0, bp->b_bcount - toread); } } else { size_t tmp; u_int32_t towrite; if (ntfs_cntob(bp->b_blkno) + bp->b_bcount >= fp->f_size) { printf("ntfs_strategy: CAN'T EXTEND FILE\n"); bp->b_error = error = EFBIG; } else { towrite = MIN(bp->b_bcount, fp->f_size - ntfs_cntob(bp->b_blkno)); dprintf(("ntfs_strategy: towrite: %d, fsize: %d\n", towrite,(u_int32_t)fp->f_size)); error = ntfs_writeattr_plain(ntmp, ip, fp->f_attrtype, fp->f_attrname, ntfs_cntob(bp->b_blkno),towrite, bp->b_data, &tmp, NULL); if (error) { printf("ntfs_strategy: ntfs_writeattr fail\n"); bp->b_error = error; } } } biodone(bp); return (error); }
/* * Note: This routine is single threaded * Protected by FIFOOPEN flag (i.e. flk_lock is not held) * Upon successful completion, the original fifo is unlocked * and FIFOOPEN is cleared for the original vpp. * The new fifo returned has FIFOOPEN set. */ static int fifo_connld(struct vnode **vpp, int flag, cred_t *crp) { struct vnode *vp1; struct vnode *vp2; struct fifonode *oldfnp; struct fifonode *fn_dest; int error; struct file *filep; struct fifolock *fn_lock; cred_t *c; /* * Get two vnodes that will represent the pipe ends for the new pipe. */ makepipe(&vp1, &vp2); /* * Allocate a file descriptor and file pointer for one of the pipe * ends. The file descriptor will be used to send that pipe end to * the process on the other end of this stream. Note that we get * the file structure only, there is no file list entry allocated. */ if (error = falloc(vp1, FWRITE|FREAD, &filep, NULL)) { VN_RELE(vp1); VN_RELE(vp2); return (error); } mutex_exit(&filep->f_tlock); oldfnp = VTOF(*vpp); fn_lock = oldfnp->fn_lock; fn_dest = oldfnp->fn_dest; /* * Create two new stream heads and attach them to the two vnodes for * the new pipe. */ if ((error = fifo_stropen(&vp1, FREAD|FWRITE, filep->f_cred, 0, 0)) != 0 || (error = fifo_stropen(&vp2, flag, filep->f_cred, 0, 0)) != 0) { #if DEBUG cmn_err(CE_NOTE, "fifo stropen failed error 0x%x", error); #endif /* * this will call fifo_close and VN_RELE on vp1 */ (void) closef(filep); VN_RELE(vp2); return (error); } /* * twist the ends of the pipe together */ strmate(vp1, vp2); /* * Set our end to busy in open * Note: Don't need lock around this because we're the only * one who knows about it */ VTOF(vp2)->fn_flag |= FIFOOPEN; mutex_enter(&fn_lock->flk_lock); fn_dest->fn_flag |= FIFOSEND; /* * check to make sure neither end of pipe has gone away */ if (!(fn_dest->fn_flag & FIFOISOPEN)) { error = ENXIO; fn_dest->fn_flag &= ~FIFOSEND; mutex_exit(&fn_lock->flk_lock); /* * this will call fifo_close and VN_RELE on vp1 */ goto out; } mutex_exit(&fn_lock->flk_lock); /* * Tag the sender's credential on the pipe descriptor. */ crhold(VTOF(vp1)->fn_pcredp = crp); VTOF(vp1)->fn_cpid = curproc->p_pid; /* * send the file descriptor to other end of pipe */ if (error = do_sendfp((*vpp)->v_stream, filep, crp)) { mutex_enter(&fn_lock->flk_lock); fn_dest->fn_flag &= ~FIFOSEND; mutex_exit(&fn_lock->flk_lock); /* * this will call fifo_close and VN_RELE on vp1 */ goto out; } mutex_enter(&fn_lock->flk_lock); /* * Wait for other end to receive file descriptor * FIFOCLOSE indicates that one or both sides of the pipe * have gone away. */ while ((fn_dest->fn_flag & (FIFOCLOSE | FIFOSEND)) == FIFOSEND) { if (!cv_wait_sig(&oldfnp->fn_wait_cv, &fn_lock->flk_lock)) { error = EINTR; fn_dest->fn_flag &= ~FIFOSEND; mutex_exit(&fn_lock->flk_lock); goto out; } } /* * If either end of pipe has gone away and the other end did not * receive pipe, reject the connld open */ if ((fn_dest->fn_flag & FIFOSEND)) { error = ENXIO; fn_dest->fn_flag &= ~FIFOSEND; mutex_exit(&fn_lock->flk_lock); goto out; } oldfnp->fn_flag &= ~FIFOOPEN; cv_broadcast(&oldfnp->fn_wait_cv); mutex_exit(&fn_lock->flk_lock); VN_RELE(*vpp); *vpp = vp2; (void) closef(filep); return (0); out: c = filep->f_cred; crhold(c); (void) closef(filep); VTOF(vp2)->fn_flag &= ~FIFOOPEN; (void) fifo_close(vp2, flag, 1, (offset_t)0, c, NULL); crfree(c); VN_RELE(vp2); return (error); }
/* * pipe(2) system call. * Create a pipe by connecting two streams together. Associate * each end of the pipe with a vnode, a file descriptor and * one of the streams. */ longlong_t pipe() { vnode_t *vp1, *vp2; struct file *fp1, *fp2; int error = 0; int fd1, fd2; rval_t r; /* * Allocate and initialize two vnodes. */ makepipe(&vp1, &vp2); /* * Allocate and initialize two file table entries and two * file pointers. Each file pointer is open for read and * write. */ if (error = falloc(vp1, FWRITE|FREAD, &fp1, &fd1)) { VN_RELE(vp1); VN_RELE(vp2); return ((longlong_t)set_errno(error)); } if (error = falloc(vp2, FWRITE|FREAD, &fp2, &fd2)) goto out2; /* * Create two stream heads and attach to each vnode. */ if (error = fifo_stropen(&vp1, FWRITE|FREAD, fp1->f_cred, 0, 0)) goto out; if (error = fifo_stropen(&vp2, FWRITE|FREAD, fp2->f_cred, 0, 0)) { (void) VOP_CLOSE(vp1, FWRITE|FREAD, 1, (offset_t)0, fp1->f_cred); goto out; } strmate(vp1, vp2); VTOF(vp1)->fn_ino = VTOF(vp2)->fn_ino = fifogetid(); /* * Now fill in the entries that falloc reserved */ mutex_exit(&fp1->f_tlock); mutex_exit(&fp2->f_tlock); setf(fd1, fp1); setf(fd2, fp2); /* * Return the file descriptors to the user. They now * point to two different vnodes which have different * stream heads. */ r.r_val1 = fd1; r.r_val2 = fd2; return (r.r_vals); out: unfalloc(fp2); setf(fd2, NULL); out2: unfalloc(fp1); setf(fd1, NULL); VN_RELE(vp1); VN_RELE(vp2); return ((longlong_t)set_errno(error)); }
/* * Calculate the logical to physical mapping if not done already, * then call the device strategy routine. */ int ntfs_strategy(void *v) { struct vop_strategy_args *ap = v; struct buf *bp = ap->a_bp; struct vnode *vp = bp->b_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct ntfsmount *ntmp = ip->i_mp; int error, s; DPRINTF("ntfs_strategy: blkno: %lld, lblkno: %lld\n", (long long)bp->b_blkno, (long long)bp->b_lblkno); DPRINTF("strategy: bcount: %ld flags: 0x%lx\n", bp->b_bcount, bp->b_flags); if (bp->b_flags & B_READ) { u_int32_t toread; if (ntfs_cntob(bp->b_blkno) >= fp->f_size) { clrbuf(bp); error = 0; } else { toread = MIN(bp->b_bcount, fp->f_size - ntfs_cntob(bp->b_blkno)); DPRINTF("ntfs_strategy: toread: %u, fsize: %llu\n", toread, fp->f_size); error = ntfs_readattr(ntmp, ip, fp->f_attrtype, fp->f_attrname, ntfs_cntob(bp->b_blkno), toread, bp->b_data, NULL); if (error) { printf("ntfs_strategy: ntfs_readattr failed\n"); bp->b_error = error; bp->b_flags |= B_ERROR; } bzero(bp->b_data + toread, bp->b_bcount - toread); } } else { size_t tmp; u_int32_t towrite; if (ntfs_cntob(bp->b_blkno) + bp->b_bcount >= fp->f_size) { printf("ntfs_strategy: CAN'T EXTEND FILE\n"); bp->b_error = error = EFBIG; bp->b_flags |= B_ERROR; } else { towrite = MIN(bp->b_bcount, fp->f_size - ntfs_cntob(bp->b_blkno)); DPRINTF("ntfs_strategy: towrite: %u, fsize: %llu\n", towrite, fp->f_size); error = ntfs_writeattr_plain(ntmp, ip, fp->f_attrtype, fp->f_attrname, ntfs_cntob(bp->b_blkno),towrite, bp->b_data, &tmp, NULL); if (error) { printf("ntfs_strategy: ntfs_writeattr fail\n"); bp->b_error = error; bp->b_flags |= B_ERROR; } } } s = splbio(); biodone(bp); splx(s); return (error); }
int ntfs_readdir(void *v) { struct vop_readdir_args *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct uio *uio = ap->a_uio; struct ntfsmount *ntmp = ip->i_mp; int i, error = 0; u_int32_t faked = 0, num; struct dirent cde; off_t off; DPRINTF("ntfs_readdir %u off: %lld resid: %zu\n", ip->i_number, uio->uio_offset, uio->uio_resid); off = uio->uio_offset; memset(&cde, 0, sizeof(cde)); /* Simulate . in every dir except ROOT */ if (ip->i_number != NTFS_ROOTINO && uio->uio_offset == 0) { cde.d_fileno = ip->i_number; cde.d_reclen = sizeof(struct dirent); cde.d_type = DT_DIR; cde.d_namlen = 1; cde.d_off = sizeof(struct dirent); cde.d_name[0] = '.'; cde.d_name[1] = '\0'; error = uiomove(&cde, sizeof(struct dirent), uio); if (error) goto out; } /* Simulate .. in every dir including ROOT */ if (uio->uio_offset < 2 * sizeof(struct dirent)) { cde.d_fileno = NTFS_ROOTINO; /* XXX */ cde.d_reclen = sizeof(struct dirent); cde.d_type = DT_DIR; cde.d_namlen = 2; cde.d_off = 2 * sizeof(struct dirent); cde.d_name[0] = '.'; cde.d_name[1] = '.'; cde.d_name[2] = '\0'; error = uiomove(&cde, sizeof(struct dirent), uio); if (error) goto out; } faked = (ip->i_number == NTFS_ROOTINO) ? 1 : 2; num = uio->uio_offset / sizeof(struct dirent) - faked; while (uio->uio_resid >= sizeof(struct dirent)) { struct attr_indexentry *iep; char *fname; size_t remains; int sz; error = ntfs_ntreaddir(ntmp, fp, num, &iep, uio->uio_procp); if (error) goto out; if (NULL == iep) break; for(; !(iep->ie_flag & NTFS_IEFLAG_LAST) && (uio->uio_resid >= sizeof(struct dirent)); iep = NTFS_NEXTREC(iep, struct attr_indexentry *)) { if(!ntfs_isnamepermitted(ntmp,iep)) continue; remains = sizeof(cde.d_name) - 1; fname = cde.d_name; for(i=0; i<iep->ie_fnamelen; i++) { sz = (*ntmp->ntm_wput)(fname, remains, iep->ie_fname[i]); fname += sz; remains -= sz; } *fname = '\0'; DPRINTF("ntfs_readdir: elem: %u, fname:[%s] type: %u, " "flag: %u, ", num, cde.d_name, iep->ie_fnametype, iep->ie_flag); cde.d_namlen = fname - (char *) cde.d_name; cde.d_fileno = iep->ie_number; cde.d_type = (iep->ie_fflag & NTFS_FFLAG_DIR) ? DT_DIR : DT_REG; cde.d_reclen = sizeof(struct dirent); cde.d_off = uio->uio_offset + sizeof(struct dirent); DPRINTF("%s\n", cde.d_type == DT_DIR ? "dir" : "reg"); error = uiomove(&cde, sizeof(struct dirent), uio); if (error) goto out; num++; } } DPRINTF("ntfs_readdir: %u entries (%lld bytes) read\n", num, uio->uio_offset - off); DPRINTF("ntfs_readdir: off: %lld resid: %zu\n", uio->uio_offset, uio->uio_resid); /* if (ap->a_eofflag) *ap->a_eofflag = VTONT(ap->a_vp)->i_size <= uio->uio_offset; */ out: if (fp->f_dirblbuf != NULL) { free(fp->f_dirblbuf, M_NTFSDIR, 0); fp->f_dirblbuf = NULL; } return (error); }
/* * Function to traverse all the records of a btree and then call caller-provided * callback function for every record found. The type of btree is chosen based * on the fileID provided by the caller. This fuction grabs the correct locks * depending on the type of btree it will be traversing and flags provided * by the caller. * * Note: It might drop and reacquire the locks during execution. */ static errno_t traverse_btree(struct hfsmount *hfsmp, uint32_t btree_fileID, traverse_btree_flag_t flags, void *fsinfo, int (*callback)(struct hfsmount *, HFSPlusKey *, HFSPlusRecord *, void *)) { int error = 0; int lockflags = 0; int ret_lockflags = 0; FCB *fcb; struct BTreeIterator *iterator = NULL; struct FSBufferDescriptor btdata; int btree_operation; HFSPlusRecord record; HFSPlusKey *key; uint64_t start, timeout_abs; switch(btree_fileID) { case kHFSExtentsFileID: fcb = VTOF(hfsmp->hfs_extents_vp); lockflags = SFL_EXTENTS; break; case kHFSCatalogFileID: fcb = VTOF(hfsmp->hfs_catalog_vp); lockflags = SFL_CATALOG; break; case kHFSAttributesFileID: // Attributes file doesn’t exist, There are no records to iterate. if (hfsmp->hfs_attribute_vp == NULL) return error; fcb = VTOF(hfsmp->hfs_attribute_vp); lockflags = SFL_ATTRIBUTE; break; default: return EINVAL; } MALLOC(iterator, struct BTreeIterator *, sizeof(struct BTreeIterator), M_TEMP, M_WAITOK | M_ZERO); /* The key is initialized to zero because we are traversing entire btree */ key = (HFSPlusKey *)&iterator->key; if (flags & TRAVERSE_BTREE_EXTENTS) { lockflags |= SFL_EXTENTS; } btdata.bufferAddress = &record; btdata.itemSize = sizeof(HFSPlusRecord); btdata.itemCount = 1; /* Lock btree for duration of traversal */ ret_lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_SHARED_LOCK); btree_operation = kBTreeFirstRecord; nanoseconds_to_absolutetime(HFS_FSINFO_MAX_LOCKHELD_TIME, &timeout_abs); start = mach_absolute_time(); while (1) { if (msleep(NULL, NULL, PINOD | PCATCH, "hfs_fsinfo", NULL) == EINTR) { error = EINTR; break; } error = BTIterateRecord(fcb, btree_operation, iterator, &btdata, NULL); if (error != 0) { if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) { error = 0; } break; } /* Lookup next btree record on next call to BTIterateRecord() */ btree_operation = kBTreeNextRecord; /* Call our callback function and stop iteration if there are any errors */ error = callback(hfsmp, key, &record, fsinfo); if (error) { break; } /* let someone else use the tree after we've processed over HFS_FSINFO_MAX_LOCKHELD_TIME */ if ((mach_absolute_time() - start) >= timeout_abs) { /* release b-tree locks and let someone else get the lock */ hfs_systemfile_unlock (hfsmp, ret_lockflags); /* add tsleep here to force context switch and fairness */ tsleep((caddr_t)hfsmp, PRIBIO, "hfs_fsinfo", 1); /* * re-acquire the locks in the same way that we wanted them originally. * note: it is subtle but worth pointing out that in between the time that we * released and now want to re-acquire these locks that the b-trees may have shifted * slightly but significantly. For example, the catalog or other b-tree could have grown * past 8 extents and now requires the extents lock to be held in order to be safely * manipulated. We can't be sure of the state of the b-tree from where we last left off. */ ret_lockflags = hfs_systemfile_lock (hfsmp, lockflags, HFS_SHARED_LOCK); /* * It's highly likely that the search key we stashed away before dropping lock * no longer points to an existing item. Iterator's IterateRecord is able to * re-position itself and process the next record correctly. With lock dropped, * there might be records missed for statistic gathering, which is ok. The * point is to get aggregate values. */ start = mach_absolute_time(); /* loop back around and get another record */ } } hfs_systemfile_unlock(hfsmp, ret_lockflags); FREE (iterator, M_TEMP); return MacToVFSError(error); }
int ntfs_readdir(void *v) { struct vop_readdir_args /* { struct vnode *a_vp; struct uio *a_uio; kauth_cred_t a_cred; int *a_ncookies; u_int **cookies; } */ *ap = v; struct vnode *vp = ap->a_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct uio *uio = ap->a_uio; struct ntfsmount *ntmp = ip->i_mp; int i, error = 0; u_int32_t faked = 0, num; int ncookies = 0; struct dirent *cde; off_t off; dprintf(("ntfs_readdir %llu off: %qd resid: %qd\n", (unsigned long long)ip->i_number, (long long)uio->uio_offset, (long long)uio->uio_resid)); off = uio->uio_offset; cde = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); /* Simulate . in every dir except ROOT */ if (ip->i_number != NTFS_ROOTINO && uio->uio_offset < sizeof(struct dirent)) { cde->d_fileno = ip->i_number; cde->d_reclen = sizeof(struct dirent); cde->d_type = DT_DIR; cde->d_namlen = 1; strncpy(cde->d_name, ".", 2); error = uiomove((void *)cde, sizeof(struct dirent), uio); if (error) goto out; ncookies++; } /* Simulate .. in every dir including ROOT */ if (uio->uio_offset < 2 * sizeof(struct dirent)) { cde->d_fileno = NTFS_ROOTINO; /* XXX */ cde->d_reclen = sizeof(struct dirent); cde->d_type = DT_DIR; cde->d_namlen = 2; strncpy(cde->d_name, "..", 3); error = uiomove((void *) cde, sizeof(struct dirent), uio); if (error) goto out; ncookies++; } faked = (ip->i_number == NTFS_ROOTINO) ? 1 : 2; num = uio->uio_offset / sizeof(struct dirent) - faked; while (uio->uio_resid >= sizeof(struct dirent)) { struct attr_indexentry *iep; char *fname; size_t remains; int sz; error = ntfs_ntreaddir(ntmp, fp, num, &iep); if (error) goto out; if (NULL == iep) break; for(; !(iep->ie_flag & NTFS_IEFLAG_LAST) && (uio->uio_resid >= sizeof(struct dirent)); iep = NTFS_NEXTREC(iep, struct attr_indexentry *)) { if(!ntfs_isnamepermitted(ntmp,iep)) continue; remains = sizeof(cde->d_name) - 1; fname = cde->d_name; for(i=0; i<iep->ie_fnamelen; i++) { sz = (*ntmp->ntm_wput)(fname, remains, iep->ie_fname[i]); fname += sz; remains -= sz; } *fname = '\0'; dprintf(("ntfs_readdir: elem: %d, fname:[%s] type: %d, flag: %d, ", num, cde->d_name, iep->ie_fnametype, iep->ie_flag)); cde->d_namlen = fname - (char *) cde->d_name; cde->d_fileno = iep->ie_number; cde->d_type = (iep->ie_fflag & NTFS_FFLAG_DIR) ? DT_DIR : DT_REG; cde->d_reclen = sizeof(struct dirent); dprintf(("%s\n", (cde->d_type == DT_DIR) ? "dir":"reg")); error = uiomove((void *)cde, sizeof(struct dirent), uio); if (error) goto out; ncookies++; num++; } } dprintf(("ntfs_readdir: %d entries (%d bytes) read\n", ncookies,(u_int)(uio->uio_offset - off))); dprintf(("ntfs_readdir: off: %qd resid: %qu\n", (long long)uio->uio_offset,(long long)uio->uio_resid)); if (!error && ap->a_ncookies != NULL) { struct dirent* dpStart; struct dirent* dp; off_t *cookies; off_t *cookiep; dprintf(("ntfs_readdir: %d cookies\n",ncookies)); dpStart = (struct dirent *) ((char *)uio->uio_iov->iov_base - (uio->uio_offset - off)); cookies = malloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); for (dp = dpStart, cookiep = cookies, i=0; i < ncookies; dp = (struct dirent *)((char *) dp + dp->d_reclen), i++) { off += dp->d_reclen; *cookiep++ = (u_int) off; } *ap->a_ncookies = ncookies; *ap->a_cookies = cookies; } /* if (ap->a_eofflag) *ap->a_eofflag = VTONT(ap->a_vp)->i_size <= uio->uio_offset; */ out: free(cde, M_TEMP); return (error); }
/* * This is a helper function that counts the total number of valid * extents in all the overflow extent records for given fileID * in overflow extents btree */ static errno_t hfs_count_overflow_extents(struct hfsmount *hfsmp, uint32_t fileID, uint32_t *num_extents) { int error; FCB *fcb; struct BTreeIterator *iterator = NULL; FSBufferDescriptor btdata; HFSPlusExtentKey *extentKey; HFSPlusExtentRecord extentData; uint32_t extent_count = 0; int i; fcb = VTOF(hfsmp->hfs_extents_vp); MALLOC(iterator, struct BTreeIterator *, sizeof(struct BTreeIterator), M_TEMP, M_WAITOK | M_ZERO); extentKey = (HFSPlusExtentKey *) &iterator->key; extentKey->keyLength = kHFSPlusExtentKeyMaximumLength; extentKey->forkType = kHFSDataForkType; extentKey->fileID = fileID; extentKey->startBlock = 0; btdata.bufferAddress = &extentData; btdata.itemSize = sizeof(HFSPlusExtentRecord); btdata.itemCount = 1; /* Search for overflow extent record */ error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator); /* * We used startBlock of zero, so we will not find any records and errors * are expected. It will also position the iterator just before the first * overflow extent record for given fileID (if any). */ if (error && error != fsBTRecordNotFoundErr && error != fsBTEndOfIterationErr) goto out; error = 0; for (;;) { if (msleep(NULL, NULL, PINOD | PCATCH, "hfs_fsinfo", NULL) == EINTR) { error = EINTR; break; } error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL); if (error != 0) { /* These are expected errors, so mask them */ if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) { error = 0; } break; } /* If we encounter different fileID, stop the iteration */ if (extentKey->fileID != fileID) { break; } if (extentKey->forkType != kHFSDataForkType) break; /* This is our record of interest; only count the datafork extents. */ for (i = 0; i < kHFSPlusExtentDensity; i++) { if (extentData[i].blockCount == 0) { break; } extent_count++; } } out: FREE(iterator, M_TEMP); if (error == 0) { *num_extents = extent_count; } return MacToVFSError(error); }