int xfs_dir3_data_readahead( struct xfs_inode *dp, xfs_dablk_t bno, xfs_daddr_t mapped_bno) { return xfs_da_reada_buf(dp, bno, mapped_bno, XFS_DATA_FORK, &xfs_dir3_data_reada_buf_ops); }
STATIC int xfs_dir_open( struct inode *inode, struct file *file) { struct xfs_inode *ip = XFS_I(inode); int mode; int error; error = xfs_file_open(inode, file); if (error) return error; mode = xfs_ilock_map_shared(ip); if (ip->i_d.di_nextents > 0) xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); xfs_iunlock(ip, mode); return 0; }
STATIC int xfs_dir_open( struct inode *inode, struct file *file) { struct xfs_inode *ip = XFS_I(inode); int mode; int error; error = xfs_file_open(inode, file); if (error) return error; /* * If there are any blocks, read-ahead block 0 as we're almost * certain to have the next operation be a read there. */ mode = xfs_ilock_map_shared(ip); if (ip->i_d.di_nextents > 0) xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); xfs_iunlock(ip, mode); return 0; }
STATIC int xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, int *eofp, dirent_t *dbp, xfs_dir_put_t put) { xfs_da_intnode_t *node; xfs_da_node_entry_t *btree; xfs_dir_leafblock_t *leaf; xfs_dablk_t bno, nextbno; xfs_dahash_t cookhash; xfs_mount_t *mp; int error, eob, i; xfs_dabuf_t *bp; xfs_daddr_t nextda; /* * Pick up our context. */ mp = dp->i_mount; bp = NULL; bno = XFS_DA_COOKIE_BNO(mp, uio->uio_offset); cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); xfs_dir_trace_g_du("node: start", dp, uio); /* * Re-find our place, even if we're confused about what our place is. * * First we check the block number from the magic cookie, it is a * cache of where we ended last time. If we find a leaf block, and * the starting hashval in that block is less than our desired * hashval, then we run with it. */ if (bno > 0) { error = xfs_da_read_buf(trans, dp, bno, -1, &bp, XFS_DATA_FORK); if ((error != 0) && (error != EFSCORRUPTED)) return(error); if (bp) leaf = bp->data; if (bp && INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { xfs_dir_trace_g_dub("node: block not a leaf", dp, uio, bno); xfs_da_brelse(trans, bp); bp = NULL; } if (bp && INT_GET(leaf->entries[0].hashval, ARCH_CONVERT) > cookhash) { xfs_dir_trace_g_dub("node: leaf hash too large", dp, uio, bno); xfs_da_brelse(trans, bp); bp = NULL; } if (bp && cookhash > INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT)) { xfs_dir_trace_g_dub("node: leaf hash too small", dp, uio, bno); xfs_da_brelse(trans, bp); bp = NULL; } } /* * If we did not find a leaf block from the blockno in the cookie, * or we there was no blockno in the cookie (eg: first time thru), * the we start at the top of the Btree and re-find our hashval. */ if (bp == NULL) { xfs_dir_trace_g_du("node: start at root" , dp, uio); bno = 0; for (;;) { error = xfs_da_read_buf(trans, dp, bno, -1, &bp, XFS_DATA_FORK); if (error) return(error); if (bp == NULL) return(XFS_ERROR(EFSCORRUPTED)); node = bp->data; if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) break; btree = &node->btree[0]; xfs_dir_trace_g_dun("node: node detail", dp, uio, node); for (i = 0; i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) { if (INT_GET(btree->hashval, ARCH_CONVERT) >= cookhash) { bno = INT_GET(btree->before, ARCH_CONVERT); break; } } if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { xfs_da_brelse(trans, bp); xfs_dir_trace_g_du("node: hash beyond EOF", dp, uio); uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH); *eofp = 1; return(0); } xfs_dir_trace_g_dub("node: going to block", dp, uio, bno); xfs_da_brelse(trans, bp); } } ASSERT(cookhash != XFS_DA_MAXHASH); /* * We've dropped down to the (first) leaf block that contains the * hashval we are interested in. Continue rolling upward thru the * leaf blocks until we fill up our buffer. */ for (;;) { leaf = bp->data; if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf); xfs_da_brelse(trans, bp); return XFS_ERROR(EFSCORRUPTED); } xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf); if (nextbno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT)) { nextda = xfs_da_reada_buf(trans, dp, nextbno, XFS_DATA_FORK); } else nextda = -1; error = xfs_dir_leaf_getdents_int(bp, dp, bno, uio, &eob, dbp, put, nextda); xfs_da_brelse(trans, bp); bno = nextbno; if (eob) { xfs_dir_trace_g_dub("node: E-O-B", dp, uio, bno); *eofp = 0; return(error); } if (bno == 0) break; error = xfs_da_read_buf(trans, dp, bno, nextda, &bp, XFS_DATA_FORK); if (error) return(error); if (bp == NULL) return(XFS_ERROR(EFSCORRUPTED)); } *eofp = 1; xfs_dir_trace_g_du("node: E-O-F", dp, uio); return(0); }
/* * Getdents (readdir) for leaf and node directories. * This reads the data blocks only, so is the same for both forms. */ int /* error */ xfs_dir2_leaf_getdents( xfs_inode_t *dp, /* incore directory inode */ void *dirent, size_t bufsize, xfs_off_t *offset, filldir_t filldir) { xfs_dabuf_t *bp; /* data block buffer */ int byteoff; /* offset in current block */ xfs_dir2_db_t curdb; /* db for current block */ xfs_dir2_off_t curoff; /* current overall offset */ xfs_dir2_data_t *data; /* data block structure */ xfs_dir2_data_entry_t *dep; /* data entry */ xfs_dir2_data_unused_t *dup; /* unused entry */ int error = 0; /* error return value */ int i; /* temporary loop index */ int j; /* temporary loop index */ int length; /* temporary length value */ xfs_bmbt_irec_t *map; /* map vector for blocks */ xfs_extlen_t map_blocks; /* number of fsbs in map */ xfs_dablk_t map_off; /* last mapped file offset */ int map_size; /* total entries in *map */ int map_valid; /* valid entries in *map */ xfs_mount_t *mp; /* filesystem mount point */ xfs_dir2_off_t newoff; /* new curoff after new blk */ int nmap; /* mappings to ask xfs_bmapi */ char *ptr = NULL; /* pointer to current data */ int ra_current; /* number of read-ahead blks */ int ra_index; /* *map index for read-ahead */ int ra_offset; /* map entry offset for ra */ int ra_want; /* readahead count wanted */ /* * If the offset is at or past the largest allowed value, * give up right away. */ if (*offset >= XFS_DIR2_MAX_DATAPTR) return 0; mp = dp->i_mount; /* * Set up to bmap a number of blocks based on the caller's * buffer size, the directory block size, and the filesystem * block size. */ map_size = howmany(bufsize + mp->m_dirblksize, mp->m_sb.sb_blocksize); map = kmem_alloc(map_size * sizeof(*map), KM_SLEEP); map_valid = ra_index = ra_offset = ra_current = map_blocks = 0; bp = NULL; /* * Inside the loop we keep the main offset value as a byte offset * in the directory file. */ curoff = xfs_dir2_dataptr_to_byte(mp, *offset); /* * Force this conversion through db so we truncate the offset * down to get the start of the data block. */ map_off = xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, curoff)); /* * Loop over directory entries until we reach the end offset. * Get more blocks and readahead as necessary. */ while (curoff < XFS_DIR2_LEAF_OFFSET) { /* * If we have no buffer, or we're off the end of the * current buffer, need to get another one. */ if (!bp || ptr >= (char *)bp->data + mp->m_dirblksize) { /* * If we have a buffer, we need to release it and * take it out of the mapping. */ if (bp) { xfs_da_brelse(NULL, bp); bp = NULL; map_blocks -= mp->m_dirblkfsbs; /* * Loop to get rid of the extents for the * directory block. */ for (i = mp->m_dirblkfsbs; i > 0; ) { j = MIN((int)map->br_blockcount, i); map->br_blockcount -= j; map->br_startblock += j; map->br_startoff += j; /* * If mapping is done, pitch it from * the table. */ if (!map->br_blockcount && --map_valid) memmove(&map[0], &map[1], sizeof(map[0]) * map_valid); i -= j; } } /* * Recalculate the readahead blocks wanted. */ ra_want = howmany(bufsize + mp->m_dirblksize, mp->m_sb.sb_blocksize) - 1; ASSERT(ra_want >= 0); /* * If we don't have as many as we want, and we haven't * run out of data blocks, get some more mappings. */ if (1 + ra_want > map_blocks && map_off < xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET)) { /* * Get more bmaps, fill in after the ones * we already have in the table. */ nmap = map_size - map_valid; error = xfs_bmapi(NULL, dp, map_off, xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET) - map_off, XFS_BMAPI_METADATA, NULL, 0, &map[map_valid], &nmap, NULL, NULL); /* * Don't know if we should ignore this or * try to return an error. * The trouble with returning errors * is that readdir will just stop without * actually passing the error through. */ if (error) break; /* XXX */ /* * If we got all the mappings we asked for, * set the final map offset based on the * last bmap value received. * Otherwise, we've reached the end. */ if (nmap == map_size - map_valid) map_off = map[map_valid + nmap - 1].br_startoff + map[map_valid + nmap - 1].br_blockcount; else map_off = xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET); /* * Look for holes in the mapping, and * eliminate them. Count up the valid blocks. */ for (i = map_valid; i < map_valid + nmap; ) { if (map[i].br_startblock == HOLESTARTBLOCK) { nmap--; length = map_valid + nmap - i; if (length) memmove(&map[i], &map[i + 1], sizeof(map[i]) * length); } else { map_blocks += map[i].br_blockcount; i++; } } map_valid += nmap; } /* * No valid mappings, so no more data blocks. */ if (!map_valid) { curoff = xfs_dir2_da_to_byte(mp, map_off); break; } /* * Read the directory block starting at the first * mapping. */ curdb = xfs_dir2_da_to_db(mp, map->br_startoff); error = xfs_da_read_buf(NULL, dp, map->br_startoff, map->br_blockcount >= mp->m_dirblkfsbs ? XFS_FSB_TO_DADDR(mp, map->br_startblock) : -1, &bp, XFS_DATA_FORK); /* * Should just skip over the data block instead * of giving up. */ if (error) break; /* XXX */ /* * Adjust the current amount of read-ahead: we just * read a block that was previously ra. */ if (ra_current) ra_current -= mp->m_dirblkfsbs; /* * Do we need more readahead? */ for (ra_index = ra_offset = i = 0; ra_want > ra_current && i < map_blocks; i += mp->m_dirblkfsbs) { ASSERT(ra_index < map_valid); /* * Read-ahead a contiguous directory block. */ if (i > ra_current && map[ra_index].br_blockcount >= mp->m_dirblkfsbs) { //TODO: make sure this is right, should readahead be //replaced by readbuf? //xfs_buf_readahead(mp->m_ddev_targp, // XFS_FSB_TO_DADDR(mp, // map[ra_index].br_startblock + // ra_offset), // (int)BTOBB(mp->m_dirblksize)); //TODO: figure out flags, flags =0 //libxfs_readbuf(mp->m_dev, // XFS_FSB_TO_DADDR(mp, // map[ra_index].br_startblock + // ra_offset), // (int)BTOBB(mp->m_dirblksize), 0); ra_current = i; } /* * Read-ahead a non-contiguous directory block. * This doesn't use our mapping, but this * is a very rare case. */ else if (i > ra_current) { (void)xfs_da_reada_buf(NULL, dp, map[ra_index].br_startoff + ra_offset, XFS_DATA_FORK); ra_current = i; } /* * Advance offset through the mapping table. */ for (j = 0; j < mp->m_dirblkfsbs; j++) { /* * The rest of this extent but not * more than a dir block. */ length = MIN(mp->m_dirblkfsbs, (int)(map[ra_index].br_blockcount - ra_offset)); j += length; ra_offset += length; /* * Advance to the next mapping if * this one is used up. */ if (ra_offset == map[ra_index].br_blockcount) { ra_offset = 0; ra_index++; } } } /* * Having done a read, we need to set a new offset. */ newoff = xfs_dir2_db_off_to_byte(mp, curdb, 0); /* * Start of the current block. */ if (curoff < newoff) curoff = newoff; /* * Make sure we're in the right block. */ else if (curoff > newoff) ASSERT(xfs_dir2_byte_to_db(mp, curoff) == curdb); data = bp->data; xfs_dir2_data_check(dp, bp); /* * Find our position in the block. */ ptr = (char *)&data->u; byteoff = xfs_dir2_byte_to_off(mp, curoff); /* * Skip past the header. */ if (byteoff == 0) curoff += (uint)sizeof(data->hdr); /* * Skip past entries until we reach our offset. */ else { while ((char *)ptr - (char *)data < byteoff) { dup = (xfs_dir2_data_unused_t *)ptr; if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { length = be16_to_cpu(dup->length); ptr += length; continue; } dep = (xfs_dir2_data_entry_t *)ptr; length = xfs_dir2_data_entsize(dep->namelen); ptr += length; } /* * Now set our real offset. */ curoff = xfs_dir2_db_off_to_byte(mp, xfs_dir2_byte_to_db(mp, curoff), (char *)ptr - (char *)data); if (ptr >= (char *)data + mp->m_dirblksize) { continue; } } } /* * We have a pointer to an entry. * Is it a live one? */ dup = (xfs_dir2_data_unused_t *)ptr; /* * No, it's unused, skip over it. */ if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { length = be16_to_cpu(dup->length); ptr += length; curoff += length; continue; } dep = (xfs_dir2_data_entry_t *)ptr; length = xfs_dir2_data_entsize(dep->namelen); if (filldir(dirent, (char *)dep->name, dep->namelen, xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff, be64_to_cpu(dep->inumber), DT_UNKNOWN)) break; /* * Advance to next entry in the block. */ ptr += length; curoff += length; /* bufsize may have just been a guess; don't go negative */ bufsize = bufsize > length ? bufsize - length : 0; } /* * All done. Set output offset value to current offset. */ if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR)) *offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff; else *offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff; kmem_free(map); if (bp) xfs_da_brelse(NULL, bp); return error; }