/* ARGSUSED */ static int ext2_read(struct vop_read_args *ap) { struct vnode *vp; struct inode *ip; struct uio *uio; FS *fs; struct buf *bp; daddr_t lbn, nextlbn; off_t nextloffset; off_t bytesinfile; long size, xfersize, blkoffset; int error, orig_resid; int seqcount = ap->a_ioflag >> 16; vp = ap->a_vp; ip = VTOI(vp); uio = ap->a_uio; #ifdef DIAGNOSTIC if (uio->uio_rw != UIO_READ) panic("ext2_read: mode"); if (vp->v_type == VLNK) { if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) panic("ext2_read: short symlink"); } else if (vp->v_type != VREG && vp->v_type != VDIR) panic("ext2_read: type %d", vp->v_type); #endif fs = ip->I_FS; #if 0 if ((u_quad_t)uio->uio_offset > fs->fs_maxfilesize) return (EFBIG); #endif orig_resid = uio->uio_resid; for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) break; lbn = lblkno(fs, uio->uio_offset); nextlbn = lbn + 1; nextloffset = lblktodoff(fs, nextlbn); size = BLKSIZE(fs, ip, lbn); blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->s_frag_size - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (bytesinfile < xfersize) xfersize = bytesinfile; if (nextloffset >= ip->i_size) { error = bread(vp, lblktodoff(fs, lbn), size, &bp); } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { error = cluster_read(vp, (off_t)ip->i_size, lblktodoff(fs, lbn), size, uio->uio_resid, (ap->a_ioflag >> 16) * BKVASIZE, &bp); } else if (seqcount > 1) {
// For now, start and len are a number of 512-byte blocks int sd_read(struct sd_card *card, int start, int len, void *dest) { int bl_addr; struct dma_cb ctrl __attribute__ ((__aligned__(32))); if(card->type == 0) start *= 512; dmb(); *BLKSIZECNT = BLKSIZE(512) | BLKCNT(len); sd_send_command(CMD_READ_MULTIPLE_BLOCK, TM_BLKCNT_EN | TM_AUTO_CMD_12 | TM_DAT_CARD_TO_HOST | TM_MULTI_BLOCK | CMD_RSPNS_48 | CMD_ISDATA, start); ctrl.ti = DMA_TI_INTEN | DMA_TI_WAIT_RESP | DMA_TI_DEST_INC | DMA_TI_DEST_WIDTH | DMA_TI_SRC_DREQ | DMA_TI_PERMAP_EMMC; ctrl.source_ad = IO_TO_BUS(DATA); ctrl.dest_ad = virt_to_phy(dest); ctrl.txfr_len = 512 * len; ctrl.stride = 0; ctrl.nextconbk = 0; return dma_initiate(DMA_CHAN_EMMC, &ctrl); }
xfersize = fs->s_frag_size - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (bytesinfile < xfersize) xfersize = bytesinfile; if (nextloffset >= ip->i_size) { error = bread(vp, lblktodoff(fs, lbn), size, &bp); } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { error = cluster_read(vp, (off_t)ip->i_size, lblktodoff(fs, lbn), size, uio->uio_resid, (ap->a_ioflag >> 16) * BKVASIZE, &bp); } else if (seqcount > 1) { int nextsize = BLKSIZE(fs, ip, nextlbn); error = breadn(vp, lblktodoff(fs, lbn), size, &nextloffset, &nextsize, 1, &bp); } else { error = bread(vp, lblktodoff(fs, lbn), size, &bp); } if (error) { brelse(bp); bp = NULL; break; } /* * We should only get non-zero b_resid when an I/O error * has occurred, which should cause us to break above. * However, if the short read did not cause an error,
/* * this function handles ext4 extents block mapping */ static int ext4_ext_read(struct vop_read_args *ap) { struct vnode *vp; struct inode *ip; struct uio *uio; struct m_ext2fs *fs; struct buf *bp; struct ext4_extent nex, *ep; struct ext4_extent_header *ehp; struct ext4_extent_path path; daddr_t lbn, nextlbn, newblk = 0; off_t bytesinfile; u_short mode; int cache_type; int orig_resid; int error = 0; int depth = 0; long size, xfersize, blkoffset; vp = ap->a_vp; ip = VTOI(vp); mode = ip->i_mode; uio = ap->a_uio; memset(&path, 0, sizeof(path)); orig_resid = uio->uio_resid; KASSERT(orig_resid >= 0, ("ext2_read: uio->uio_resid < 0")); if (orig_resid == 0) return (0); KASSERT(uio->uio_offset >= 0, ("ext2_read: uio->uio_offset < 0")); fs = ip->I_FS; if (uio->uio_offset < ip->i_size && uio->uio_offset >= fs->e2fs_maxfilesize) return (EOVERFLOW); for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) break; lbn = lblkno(fs, uio->uio_offset); nextlbn = lbn + 1; size = BLKSIZE(fs, ip, lbn); blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->e2fs_fsize - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (bytesinfile < xfersize) xfersize = bytesinfile; /* get block from ext4 extent cache */ cache_type = ext4_ext_in_cache(ip, lbn, &nex); if (cache_type != 0) { /* block does not be allocated yet */ if (cache_type == EXT4_EXT_CACHE_GAP) return (error); else if (cache_type == EXT4_EXT_CACHE_IN) newblk = lbn - nex.e_blk + (nex.e_start_lo | ((daddr_t)(nex.e_start_hi) << 31) << 1); } else { ext4_ext_find_extent(fs, ip, lbn, &path); depth = ((struct ext4_extent_header *)(ip->i_db))->eh_depth; if (path.ep_ext == NULL && depth != 0) return (EIO); ehp = path.ep_header; ep = path.ep_ext; if (ep == NULL) return (EIO); ext4_ext_put_cache(ip, ep, EXT4_EXT_CACHE_IN); newblk = lbn - ep->e_blk + (ep->e_start_lo | ((daddr_t)(ep->e_start_hi) << 31) << 1); if (path.ep_bp != NULL) { brelse(path.ep_bp); path.ep_bp = NULL; } } error = bread(ip->i_devvp, fsbtodb(fs, newblk), size, NOCRED, &bp); if (error) { brelse(bp); bp = NULL; break; } size -= bp->b_resid; if (size < xfersize) { if (size == 0) break; xfersize = size; } error = uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); if (error) break; bqrelse(bp); } if (bp != NULL) bqrelse(bp); return (error); }
/* * this function handles traditional block mapping */ static int ext2_ind_read(struct vop_read_args *ap) { struct vnode *vp; struct inode *ip; struct uio *uio; FS *fs; struct buf *bp; daddr_t lbn, nextlbn; off_t bytesinfile; long size, xfersize, blkoffset; int error, orig_resid, seqcount; seqcount = ap->a_ioflag >> IO_SEQSHIFT; u_short mode; vp = ap->a_vp; ip = VTOI(vp); mode = ip->i_mode; uio = ap->a_uio; #ifdef DIAGNOSTIC if (uio->uio_rw != UIO_READ) panic("%s: mode", READ_S); if (vp->v_type == VLNK) { if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) panic("%s: short symlink", READ_S); } else if (vp->v_type != VREG && vp->v_type != VDIR) panic("%s: type %d", READ_S, vp->v_type); #endif orig_resid = uio->uio_resid; KASSERT(orig_resid >= 0, ("ext2_read: uio->uio_resid < 0")); if (orig_resid == 0) return (0); KASSERT(uio->uio_offset >= 0, ("ext2_read: uio->uio_offset < 0")); fs = ip->I_FS; if (uio->uio_offset < ip->i_size && uio->uio_offset >= fs->e2fs_maxfilesize) return (EOVERFLOW); for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) break; lbn = lblkno(fs, uio->uio_offset); nextlbn = lbn + 1; size = BLKSIZE(fs, ip, lbn); blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->e2fs_fsize - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (bytesinfile < xfersize) xfersize = bytesinfile; if (lblktosize(fs, nextlbn) >= ip->i_size) error = bread(vp, lbn, size, NOCRED, &bp); else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) error = cluster_read(vp, ip->i_size, lbn, size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp); else if (seqcount > 1) { int nextsize = BLKSIZE(fs, ip, nextlbn); error = breadn(vp, lbn, size, &nextlbn, &nextsize, 1, NOCRED, &bp); } else error = bread(vp, lbn, size, NOCRED, &bp); if (error) { brelse(bp); bp = NULL; break; } /* * We should only get non-zero b_resid when an I/O error * has occurred, which should cause us to break above. * However, if the short read did not cause an error, * then we want to ensure that we do not uiomove bad * or uninitialized data. */ size -= bp->b_resid; if (size < xfersize) { if (size == 0) break; xfersize = size; } error = uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); if (error) break; bqrelse(bp); } if (bp != NULL) bqrelse(bp); if ((error == 0 || uio->uio_resid != orig_resid) && (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) ip->i_flag |= IN_ACCESS; return (error); }