int rtems_rfs_inode_alloc (rtems_rfs_file_system* fs, rtems_rfs_bitmap_bit goal, rtems_rfs_ino* ino) { rtems_rfs_bitmap_bit bit; int rc; rc = rtems_rfs_group_bitmap_alloc (fs, goal, true, &bit); *ino = bit; return rc; }
/** * Allocate an indirect block to a map. * * @param fs The file system data. * @param map The map the allocation is for. * @param buffer The buffer the indirect block is accessed by. * @param block The block number of the indirect block allocated. * @param upping True is upping the map to the next indirect level. * @return int The error number (errno). No error if 0. */ static int rtems_rfs_block_map_indirect_alloc (rtems_rfs_file_system* fs, rtems_rfs_block_map* map, rtems_rfs_buffer_handle* buffer, rtems_rfs_block_no* block, bool upping) { rtems_rfs_bitmap_bit new_block; int rc; /* * Save the new block locally because upping can have *block pointing to the * slots which are cleared when upping. */ rc = rtems_rfs_group_bitmap_alloc (fs, map->last_map_block, false, &new_block); if (rc > 0) return rc; rc = rtems_rfs_buffer_handle_request (fs, buffer, new_block, false); if (rc > 0) { rtems_rfs_group_bitmap_free (fs, false, new_block); return rc; } memset (rtems_rfs_buffer_data (buffer), 0xff, rtems_rfs_fs_block_size (fs)); if (upping) { int b; if (rtems_rfs_trace (RTEMS_RFS_TRACE_BLOCK_MAP_GROW)) printf ("rtems-rfs: block-map-grow: upping: block-count=%" PRId32 "\n", map->size.count); for (b = 0; b < RTEMS_RFS_INODE_BLOCKS; b++) rtems_rfs_block_set_number (buffer, b, map->blocks[b]); memset (map->blocks, 0, sizeof (map->blocks)); } rtems_rfs_buffer_mark_dirty (buffer); *block = new_block; map->last_map_block = new_block; return 0; }
int rtems_rfs_block_map_grow (rtems_rfs_file_system* fs, rtems_rfs_block_map* map, size_t blocks, rtems_rfs_block_no* new_block) { int b; if (rtems_rfs_trace (RTEMS_RFS_TRACE_BLOCK_MAP_GROW)) printf ("rtems-rfs: block-map-grow: entry: blocks=%zd count=%" PRIu32 "\n", blocks, map->size.count); if ((map->size.count + blocks) >= rtems_rfs_fs_max_block_map_blocks (fs)) return EFBIG; /* * Allocate a block at a time. The buffer handles hold the blocks so adding * this way does not thrash the cache with lots of requests. */ for (b = 0; b < blocks; b++) { rtems_rfs_bitmap_bit block; int rc; /* * Allocate the block. If an indirect block is needed and cannot be * allocated free this block. */ rc = rtems_rfs_group_bitmap_alloc (fs, map->last_data_block, false, &block); if (rc > 0) return rc; if (map->size.count < RTEMS_RFS_INODE_BLOCKS) map->blocks[map->size.count] = block; else { /* * Single indirect access is occuring. It could still be doubly indirect. */ rtems_rfs_block_no direct; rtems_rfs_block_no singly; direct = map->size.count % fs->blocks_per_block; singly = map->size.count / fs->blocks_per_block; if (map->size.count < fs->block_map_singly_blocks) { /* * Singly indirect tables are being used. Allocate a new block for a * mapping table if direct is 0 or we are moving up (upping). If upping * move the direct blocks into the table and if not this is the first * entry of a new block. */ if ((direct == 0) || ((singly == 0) && (direct == RTEMS_RFS_INODE_BLOCKS))) { /* * Upping is when we move from direct to singly indirect. */ bool upping; upping = map->size.count == RTEMS_RFS_INODE_BLOCKS; rc = rtems_rfs_block_map_indirect_alloc (fs, map, &map->singly_buffer, &map->blocks[singly], upping); } else { rc = rtems_rfs_buffer_handle_request (fs, &map->singly_buffer, map->blocks[singly], true); } if (rc > 0) { rtems_rfs_group_bitmap_free (fs, false, block); return rc; } } else { /* * Doubly indirect tables are being used. */ rtems_rfs_block_no doubly; rtems_rfs_block_no singly_block; doubly = singly / fs->blocks_per_block; singly %= fs->blocks_per_block; /* * Allocate a new block for a singly indirect table if direct is 0 as * it is the first entry of a new block. We may also need to allocate a * doubly indirect block as well. Both always occur when direct is 0 * and the doubly indirect block when singly is 0. */ if (direct == 0) { rc = rtems_rfs_block_map_indirect_alloc (fs, map, &map->singly_buffer, &singly_block, false); if (rc > 0) { rtems_rfs_group_bitmap_free (fs, false, block); return rc; } /* * Allocate a new block for a doubly indirect table if singly is 0 as * it is the first entry of a new singly indirect block. */ if ((singly == 0) || ((doubly == 0) && (singly == RTEMS_RFS_INODE_BLOCKS))) { bool upping; upping = map->size.count == fs->block_map_singly_blocks; rc = rtems_rfs_block_map_indirect_alloc (fs, map, &map->doubly_buffer, &map->blocks[doubly], upping); if (rc > 0) { rtems_rfs_group_bitmap_free (fs, false, singly_block); rtems_rfs_group_bitmap_free (fs, false, block); return rc; } } else { rc = rtems_rfs_buffer_handle_request (fs, &map->doubly_buffer, map->blocks[doubly], true); if (rc > 0) { rtems_rfs_group_bitmap_free (fs, false, singly_block); rtems_rfs_group_bitmap_free (fs, false, block); return rc; } } rtems_rfs_block_set_number (&map->doubly_buffer, singly, singly_block); } else { rc = rtems_rfs_buffer_handle_request (fs, &map->doubly_buffer, map->blocks[doubly], true); if (rc > 0) { rtems_rfs_group_bitmap_free (fs, false, block); return rc; } singly_block = rtems_rfs_block_get_number (&map->doubly_buffer, singly); rc = rtems_rfs_buffer_handle_request (fs, &map->singly_buffer, singly_block, true); if (rc > 0) { rtems_rfs_group_bitmap_free (fs, false, block); return rc; } } } rtems_rfs_block_set_number (&map->singly_buffer, direct, block); } map->size.count++; map->size.offset = 0; if (b == 0) *new_block = block; map->last_data_block = block; map->dirty = true; } return 0; }