/* Read separately compressed datablock and memcopy into page cache */ int squashfs_readpage_block(struct page *page, u64 block, int bsize) { struct inode *i = page->inode; struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, block, bsize); int res = buffer->error; if (res) ERROR("Unable to read page, block %llx, size %x\n", block, bsize); else squashfs_copy_cache(page, buffer, buffer->length, 0); squashfs_cache_put(buffer); return res; }
static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, int pages, struct page **page) { struct inode *i = target_page->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, block, bsize); int bytes = buffer->length, res = buffer->error, n, offset = 0; void *pageaddr; if (res) { ERROR("Unable to read page, block %llx, size %x\n", block, bsize); goto out; } for (n = 0; n < pages && bytes > 0; n++, bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { int avail = min_t(int, bytes, PAGE_CACHE_SIZE); if (page[n] == NULL) continue; pageaddr = kmap_atomic(page[n]); squashfs_copy_data(pageaddr, buffer, offset, avail); memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); kunmap_atomic(pageaddr); flush_dcache_page(page[n]); SetPageUptodate(page[n]); unlock_page(page[n]); if (page[n] != target_page) page_cache_release(page[n]); } out: squashfs_cache_put(buffer); return res; }