static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, u32 index, int offset, struct bio *bio) { int ret; struct page *page; unsigned char *user_mem, *uncmem = NULL; struct zram_meta *meta = zram->meta; page = bvec->bv_page; if (unlikely(!meta->table[index].handle) || zram_test_flag(meta, index, ZRAM_ZERO)) { handle_zero_page(bvec); return 0; } if (is_partial_io(bvec)) /* Use a temporary buffer to decompress the page */ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); user_mem = kmap_atomic(page); if (!is_partial_io(bvec)) uncmem = user_mem; if (!uncmem) { pr_info("Unable to allocate temp memory\n"); ret = -ENOMEM; goto out_cleanup; } ret = zram_decompress_page(zram, uncmem, index); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { pr_err("Decompression failed! err=%d, page=%u\n", ret, index); zram_stat64_inc(zram, &zram->stats.failed_reads); goto out_cleanup; } if (is_partial_io(bvec)) memcpy(user_mem + bvec->bv_offset, uncmem + offset, bvec->bv_len); flush_dcache_page(page); ret = 0; out_cleanup: kunmap_atomic(user_mem); if (is_partial_io(bvec)) kfree(uncmem); return ret; }
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, u32 index, int offset, struct bio *bio) { int ret; size_t clen; struct page *page; struct zobj_header *zheader; unsigned char *user_mem, *cmem, *uncmem = NULL; page = bvec->bv_page; if (zram_test_flag(zram, index, ZRAM_ZERO)) { handle_zero_page(bvec); return 0; } /* Requested page is not present in compressed area */ if (unlikely(!zram->table[index].page)) { pr_debug("Read before write: sector=%lu, size=%u", (ulong)(bio->bi_sector), bio->bi_size); handle_zero_page(bvec); return 0; } /* Page is stored uncompressed since it's incompressible */ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { handle_uncompressed_page(zram, bvec, index, offset); return 0; } if (is_partial_io(bvec)) { /* Use a temporary buffer to decompress the page */ uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!uncmem) { pr_info("Error allocating temp memory!\n"); return -ENOMEM; } } user_mem = kmap_atomic(page, KM_USER0); if (!is_partial_io(bvec)) uncmem = user_mem; clen = PAGE_SIZE; cmem = kmap_atomic(zram->table[index].page, KM_USER1) + zram->table[index].offset; ret = DECOMPRESS( cmem + sizeof(*zheader), xv_get_object_size(cmem) - sizeof(*zheader), uncmem, &clen); if (is_partial_io(bvec)) { memcpy(user_mem + bvec->bv_offset, uncmem + offset, bvec->bv_len); kfree(uncmem); } kunmap_atomic(cmem, KM_USER1); kunmap_atomic(user_mem, KM_USER0); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret)) { pr_err("Decompression failed! err=%d, page=%u\n", ret, index); zram_stat64_inc(zram, &zram->stats.failed_reads); return ret; } flush_dcache_page(page); return 0; }
static int zram_read(struct zram *zram, struct bio *bio) { int i; u32 index; struct bio_vec *bvec; if (unlikely(!zram->init_done)) { set_bit(BIO_UPTODATE, &bio->bi_flags); bio_endio(bio, 0); return 0; } zram_inc_stat(zram, ZRAM_STAT_NUM_READS); index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; bio_for_each_segment(bvec, bio, i) { int ret; size_t zlen; u32 zoffset; struct page *bio_page, *zpage; unsigned char *bio_mem, *zmem; bio_page = bvec->bv_page; if (zram_is_zero_page(zram, index)) { handle_zero_page(bio_page); continue; } zram_find_obj(zram, index, &zpage, &zoffset); /* Requested page is not present in compressed area */ if (unlikely(!zpage)) { pr_debug("Read before write on swap device: " "sector=%lu, size=%u", (ulong)(bio->bi_sector), bio->bi_size); /* Do nothing */ continue; } /* Page is stored uncompressed since it's incompressible */ if (unlikely(!zoffset)) { handle_uncompressed_page(zram, bio_page, index); continue; } bio_mem = kmap_atomic(bio_page, KM_USER0); zlen = PAGE_SIZE; zmem = kmap_atomic(zpage, KM_USER1) + zoffset; ret = lzo1x_decompress_safe(zmem, xv_get_object_size(zmem), bio_mem, &zlen); kunmap_atomic(bio_mem, KM_USER0); kunmap_atomic(zmem, KM_USER1); /* This should NEVER happen - return bio error if it does! */ if (unlikely(ret != LZO_E_OK)) { pr_err("Decompression failed! err=%d, page=%u\n", ret, index); goto out; } flush_dcache_page(bio_page); index++; }
static int zram_read(struct zram *zram, struct bio *bio) { int i; u32 index; struct bio_vec *bvec; if (unlikely(!zram->init_done)) { set_bit(BIO_UPTODATE, &bio->bi_flags); bio_endio(bio, 0); return 0; } zram_stat64_inc(zram, &zram->stats.num_reads); index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; bio_for_each_segment(bvec, bio, i) { int ret; size_t clen; struct page *page; struct zobj_header *zheader; unsigned char *user_mem, *cmem; page = bvec->bv_page; if (zram_test_flag(zram, index, ZRAM_ZERO)) { handle_zero_page(page); index++; continue; } /* Requested page is not present in compressed area */ if (unlikely(!zram->table[index].page)) { pr_debug("Read before write: sector=%lu, size=%u", (ulong)(bio->bi_sector), bio->bi_size); /* Do nothing */ index++; continue; } /* Page is stored uncompressed since it's incompressible */ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { handle_uncompressed_page(zram, page, index); index++; continue; } user_mem = kmap_atomic(page, KM_USER0); clen = PAGE_SIZE; cmem = kmap_atomic(zram->table[index].page, KM_USER1) + zram->table[index].offset; ret = lzo1x_decompress_safe( cmem + sizeof(*zheader), xv_get_object_size(cmem) - sizeof(*zheader), user_mem, &clen); kunmap_atomic(user_mem, KM_USER0); kunmap_atomic(cmem, KM_USER1); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { pr_err("Decompression failed! err=%d, page=%u\n", ret, index); zram_stat64_inc(zram, &zram->stats.failed_reads); goto out; } flush_dcache_page(page); index++; }