/* * Read a filesystem table (uncompressed sequence of bytes) from disk */ void *squashfs_read_table(struct super_block *sb, u64 block, int length) { int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; int i, res; void *table, *buffer, **data; table = buffer = kmalloc(length, GFP_KERNEL); if (table == NULL) return ERR_PTR(-ENOMEM); data = kcalloc(pages, sizeof(void *), GFP_KERNEL); if (data == NULL) { res = -ENOMEM; goto failed; } for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) data[i] = buffer; res = squashfs_read_data(sb, data, block, length | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); kfree(data); if (res < 0) goto failed; return table; failed: kfree(table); return ERR_PTR(res); }
void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags) { struct squashfs_sb_info *msblk = sb->s_fs_info; void *strm, *buffer = NULL; int length = 0; /* * Read decompressor specific options from file system if present */ if (SQUASHFS_COMP_OPTS(flags)) { buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); if (buffer == NULL) return ERR_PTR(-ENOMEM); length = squashfs_read_data(sb, &buffer, sizeof(struct squashfs_super_block), 0, NULL, PAGE_CACHE_SIZE, 1); if (length < 0) { strm = ERR_PTR(length); goto finished; } } strm = msblk->decompressor->init(msblk, buffer, length); finished: kfree(buffer); return strm; }
/* * Read a filesystem table (uncompressed sequence of bytes) from disk */ int squashfs_read_table(struct super_block *sb, void *buffer, u64 block, int length) { int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; int i, res; void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL); if (data == NULL) return -ENOMEM; for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) data[i] = buffer; res = squashfs_read_data(sb, data, block, length | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); kfree(data); return res; }
static int read_fragment_index_table_2(struct super_block *s) { struct squashfs_sb_info *msblk = s->s_fs_info; struct squashfs_super_block *sblk = &msblk->sblk; if (!(msblk->fragment_index_2 = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES_2 (sblk->fragments), GFP_KERNEL))) { ERROR("Failed to allocate uid/gid table\n"); return 0; } if (SQUASHFS_FRAGMENT_INDEX_BYTES_2(sblk->fragments) && !squashfs_read_data(s, (char *) msblk->fragment_index_2, sblk->fragment_table_start, SQUASHFS_FRAGMENT_INDEX_BYTES_2 (sblk->fragments) | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, SQUASHFS_FRAGMENT_INDEX_BYTES_2(sblk->fragments))) { ERROR("unable to read fragment index table\n"); return 0; } if (msblk->swap) { int i; unsigned int fragment; for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES_2(sblk->fragments); i++) { SQUASHFS_SWAP_FRAGMENT_INDEXES_2((&fragment), &msblk->fragment_index_2[i], 1); msblk->fragment_index_2[i] = fragment; } } return 1; }
/* * Look-up block in cache, and increment usage count. If not in cache, read * and decompress it from disk. */ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb, struct squashfs_cache *cache, u64 block, int length) { int i, n; struct squashfs_cache_entry *entry; spin_lock(&cache->lock); while (1) { for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { if (cache->entry[i].block == block) { cache->curr_blk = i; break; } i = (i + 1) % cache->entries; } if (n == cache->entries) { /* * Block not in cache, if all cache entries are used * go to sleep waiting for one to become available. */ if (cache->unused == 0) { cache->num_waiters++; spin_unlock(&cache->lock); wait_event(cache->wait_queue, cache->unused); spin_lock(&cache->lock); cache->num_waiters--; continue; } /* * At least one unused cache entry. A simple * round-robin strategy is used to choose the entry to * be evicted from the cache. */ i = cache->next_blk; for (n = 0; n < cache->entries; n++) { if (cache->entry[i].refcount == 0) break; i = (i + 1) % cache->entries; } cache->next_blk = (i + 1) % cache->entries; entry = &cache->entry[i]; /* * Initialise chosen cache entry, and fill it in from * disk. */ cache->unused--; entry->block = block; entry->refcount = 1; entry->pending = 1; entry->num_waiters = 0; entry->error = 0; spin_unlock(&cache->lock); entry->length = squashfs_read_data(sb, entry->data, block, length, &entry->next_index, cache->block_size, cache->pages); spin_lock(&cache->lock); if (entry->length < 0) entry->error = entry->length; entry->pending = 0; /* * While filling this entry one or more other processes * have looked it up in the cache, and have slept * waiting for it to become available. */ if (entry->num_waiters) { spin_unlock(&cache->lock); wake_up_all(&entry->wait_queue); } else spin_unlock(&cache->lock); goto out; } /* * Block already in cache. Increment refcount so it doesn't * get reused until we're finished with it, if it was * previously unused there's one less cache entry available * for reuse. */ entry = &cache->entry[i]; if (entry->refcount == 0) cache->unused--; entry->refcount++; /* * If the entry is currently being filled in by another process * go to sleep waiting for it to become available. */ if (entry->pending) { entry->num_waiters++; spin_unlock(&cache->lock); wait_event(entry->wait_queue, !entry->pending); } else spin_unlock(&cache->lock); goto out; } out: TRACE("Got %s %d, start block %lld, refcount %d, error %d\n", cache->name, i, entry->block, entry->refcount, entry->error); if (entry->error) ERROR("Unable to read %s cache entry [%llx]\n", cache->name, block); return entry; }
/* Read separately compressed datablock directly into page cache */ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) { struct inode *inode = target_page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; int start_index = target_page->index & ~mask; int end_index = start_index | mask; int i, n, pages, missing_pages, bytes, res = -ENOMEM; struct page **page; struct squashfs_page_actor *actor; void *pageaddr; if (end_index > file_end) end_index = file_end; pages = end_index - start_index + 1; page = kmalloc(sizeof(void *) * pages, GFP_KERNEL); if (page == NULL) return res; /* * Create a "page actor" which will kmap and kunmap the * page cache pages appropriately within the decompressor */ actor = squashfs_page_actor_init_special(page, pages, 0); if (actor == NULL) goto out; /* Try to grab all the pages covered by the Squashfs block */ for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) { page[i] = (n == target_page->index) ? target_page : grab_cache_page_nowait(target_page->mapping, n); if (page[i] == NULL) { missing_pages++; continue; } if (PageUptodate(page[i])) { unlock_page(page[i]); page_cache_release(page[i]); page[i] = NULL; missing_pages++; } } if (missing_pages) { /* * Couldn't get one or more pages, this page has either * been VM reclaimed, but others are still in the page cache * and uptodate, or we're racing with another thread in * squashfs_readpage also trying to grab them. Fall back to * using an intermediate buffer. */ res = squashfs_read_cache(target_page, block, bsize, pages, page); if (res < 0) goto mark_errored; goto out; } /* Decompress directly into the page cache buffers */ res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); if (res < 0) goto mark_errored; /* Last page may have trailing bytes not filled */ bytes = res % PAGE_CACHE_SIZE; if (bytes) { pageaddr = kmap_atomic(page[pages - 1]); memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); kunmap_atomic(pageaddr); } /* Mark pages as uptodate, unlock and release */ for (i = 0; i < pages; i++) { flush_dcache_page(page[i]); SetPageUptodate(page[i]); unlock_page(page[i]); if (page[i] != target_page) page_cache_release(page[i]); } kfree(actor); kfree(page); return 0; mark_errored: /* Decompression failed, mark pages as errored. Target_page is * dealt with by the caller */ for (i = 0; i < pages; i++) { if (page[i] == NULL || page[i] == target_page) continue; flush_dcache_page(page[i]); SetPageError(page[i]); unlock_page(page[i]); page_cache_release(page[i]); } out: kfree(actor); kfree(page); return res; }