static void zram_free_page(struct zram *zram, size_t index)
{
	u32 clen;
	void *obj;

	struct page *page = zram->table[index].page;
	u32 offset = zram->table[index].offset;

	if (unlikely(!page)) {
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
			zram_clear_flag(zram, index, ZRAM_ZERO);
			zram_stat_dec(&zram->stats.pages_zero);
		}
		return;
	}

	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
		clen = PAGE_SIZE;
		__free_page(page);
		zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
		zram_stat_dec(&zram->stats.pages_expand);
		goto out;
	}

	obj = kmap_atomic(page, KM_USER0) + offset;
	clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
	kunmap_atomic(obj, KM_USER0);

	xv_free(zram->mem_pool, page, offset);
	if (clen <= PAGE_SIZE / 2)
		zram_stat_dec(&zram->stats.good_compress);

out:
	zram_stat64_sub(zram, &zram->stats.compr_size, clen);
	zram_stat_dec(&zram->stats.pages_stored);

	zram->table[index].page = NULL;
	zram->table[index].offset = 0;
}
Exemple #2
0
static void zram_free_page(struct zram *zram, size_t index)
{
	int zlen;
	void *obj;
	u32 offset;
	struct page *page;

	/*
	 * No memory is allocated for zero filled pages.
	 * Simply clear corresponding table entry.
	 */
	if (zram_is_zero_page(zram, index)) {
		zram_clear_zero_page(zram, index);
		zram_dec_stat(zram, ZRAM_STAT_PAGES_ZERO);
		return;
	}

	zram_find_obj(zram, index, &page, &offset);
	if (!page)
		return;

	/* Uncompressed pages cosume whole page, so offset is zero */
	if (unlikely(!offset)) {
		zlen = PAGE_SIZE;
		__free_page(page);
		zram_dec_stat(zram, ZRAM_STAT_PAGES_EXPAND);
		goto out;
	}

	obj = kmap_atomic(page, KM_USER0) + offset;
	zlen = xv_get_object_size(obj);
	kunmap_atomic(obj, KM_USER0);

	xv_free(zram->mem_pool, page, offset);

out:
	zram_add_stat(zram, ZRAM_STAT_COMPR_SIZE, -zlen);
	zram_dec_stat(zram, ZRAM_STAT_PAGES_STORED);

	zram->table[index].addr = 0;
}
static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
{
	int ret;
	size_t clen = PAGE_SIZE;
	struct zobj_header *zheader;
	unsigned char *cmem;

	if (zram_test_flag(zram, index, ZRAM_ZERO) ||
	    !zram->table[index].page) {
		memset(mem, 0, PAGE_SIZE);
		return 0;
	}

	cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
		zram->table[index].offset;

	/* Page is stored uncompressed since it's incompressible */
	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
		memcpy(mem, cmem, PAGE_SIZE);
		kunmap_atomic(cmem, KM_USER0);
		return 0;
	}

	ret = DECOMPRESS(cmem + sizeof(*zheader),
			xv_get_object_size(cmem) - sizeof(*zheader),
			mem, &clen);
	kunmap_atomic(cmem, KM_USER0);

	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		zram_stat64_inc(zram, &zram->stats.failed_reads);
		return ret;
	}

	return 0;
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
{
	int ret;
	size_t clen;
	struct page *page;
	struct zobj_header *zheader;
	unsigned char *user_mem, *cmem, *uncmem = NULL;

	page = bvec->bv_page;

	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
		handle_zero_page(bvec);
		return 0;
	}

	/* Requested page is not present in compressed area */
	if (unlikely(!zram->table[index].page)) {
		pr_debug("Read before write: sector=%lu, size=%u",
			 (ulong)(bio->bi_sector), bio->bi_size);
		handle_zero_page(bvec);
		return 0;
	}

	/* Page is stored uncompressed since it's incompressible */
	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
		handle_uncompressed_page(zram, bvec, index, offset);
		return 0;
	}

	if (is_partial_io(bvec)) {
		/* Use  a temporary buffer to decompress the page */
		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
		if (!uncmem) {
			pr_info("Error allocating temp memory!\n");
			return -ENOMEM;
		}
	}

	user_mem = kmap_atomic(page, KM_USER0);
	if (!is_partial_io(bvec))
		uncmem = user_mem;
	clen = PAGE_SIZE;

	cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
		zram->table[index].offset;

	ret = DECOMPRESS(
			cmem + sizeof(*zheader),
			xv_get_object_size(cmem) - sizeof(*zheader),
			uncmem, &clen);

	if (is_partial_io(bvec)) {
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
		       bvec->bv_len);
		kfree(uncmem);
	}

	kunmap_atomic(cmem, KM_USER1);
	kunmap_atomic(user_mem, KM_USER0);

	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		zram_stat64_inc(zram, &zram->stats.failed_reads);
		return ret;

	}

	flush_dcache_page(page);
	return 0;
}
Exemple #5
0
static int zram_read(struct zram *zram, struct bio *bio)
{

	int i;
	u32 index;
	struct bio_vec *bvec;


	if (unlikely(!zram->init_done)) {
		set_bit(BIO_UPTODATE, &bio->bi_flags);
		bio_endio(bio, 0);
		return 0;
	}
	zram_inc_stat(zram, ZRAM_STAT_NUM_READS);
	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;

	bio_for_each_segment(bvec, bio, i) {
		int ret;
		size_t zlen;
		u32 zoffset;
		struct page *bio_page, *zpage;
		unsigned char *bio_mem, *zmem;

		bio_page = bvec->bv_page;

		if (zram_is_zero_page(zram, index)) {
			handle_zero_page(bio_page);
			continue;
		}

		zram_find_obj(zram, index, &zpage, &zoffset);

		/* Requested page is not present in compressed area */
		if (unlikely(!zpage)) {
			pr_debug("Read before write on swap device: "
				"sector=%lu, size=%u",
				(ulong)(bio->bi_sector), bio->bi_size);
			/* Do nothing */
			continue;
		}

		/* Page is stored uncompressed since it's incompressible */
		if (unlikely(!zoffset)) {
			handle_uncompressed_page(zram, bio_page, index);
			continue;
		}

		bio_mem = kmap_atomic(bio_page, KM_USER0);
		zlen = PAGE_SIZE;

		zmem = kmap_atomic(zpage, KM_USER1) + zoffset;

		ret = lzo1x_decompress_safe(zmem, xv_get_object_size(zmem),
					bio_mem, &zlen);

		kunmap_atomic(bio_mem, KM_USER0);
		kunmap_atomic(zmem, KM_USER1);

		/* This should NEVER happen - return bio error if it does! */
		if (unlikely(ret != LZO_E_OK)) {
			pr_err("Decompression failed! err=%d, page=%u\n",
				ret, index);
			goto out;
		}

		flush_dcache_page(bio_page);
		index++;
	}
Exemple #6
0
static int zram_read(struct zram *zram, struct bio *bio)
{

	int i;
	u32 index;
	struct bio_vec *bvec;

	if (unlikely(!zram->init_done)) {
		set_bit(BIO_UPTODATE, &bio->bi_flags);
		bio_endio(bio, 0);
		return 0;
	}

	zram_stat64_inc(zram, &zram->stats.num_reads);
	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;

	bio_for_each_segment(bvec, bio, i) {
		int ret;
		size_t clen;
		struct page *page;
		struct zobj_header *zheader;
		unsigned char *user_mem, *cmem;

		page = bvec->bv_page;

		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
			handle_zero_page(page);
			index++;
			continue;
		}

		/* Requested page is not present in compressed area */
		if (unlikely(!zram->table[index].page)) {
			pr_debug("Read before write: sector=%lu, size=%u",
				(ulong)(bio->bi_sector), bio->bi_size);
			/* Do nothing */
			index++;
			continue;
		}

		/* Page is stored uncompressed since it's incompressible */
		if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
			handle_uncompressed_page(zram, page, index);
			index++;
			continue;
		}

		user_mem = kmap_atomic(page, KM_USER0);
		clen = PAGE_SIZE;

		cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
				zram->table[index].offset;

		ret = lzo1x_decompress_safe(
			cmem + sizeof(*zheader),
			xv_get_object_size(cmem) - sizeof(*zheader),
			user_mem, &clen);

		kunmap_atomic(user_mem, KM_USER0);
		kunmap_atomic(cmem, KM_USER1);

		/* Should NEVER happen. Return bio error if it does. */
		if (unlikely(ret != LZO_E_OK)) {
			pr_err("Decompression failed! err=%d, page=%u\n",
				ret, index);
			zram_stat64_inc(zram, &zram->stats.failed_reads);
			goto out;
		}

		flush_dcache_page(page);
		index++;
	}