static void zram_free_page(struct zram *zram, size_t index)
{
	u32 clen;
	void *obj;

	struct page *page = zram->table[index].page;
	u32 offset = zram->table[index].offset;

	if (unlikely(!page)) {
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
			zram_clear_flag(zram, index, ZRAM_ZERO);
			zram_stat_dec(&zram->stats.pages_zero);
		}
		return;
	}

	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
		clen = PAGE_SIZE;
		__free_page(page);
		zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
		zram_stat_dec(&zram->stats.pages_expand);
		goto out;
	}

	obj = kmap_atomic(page, KM_USER0) + offset;
	clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
	kunmap_atomic(obj, KM_USER0);

	xv_free(zram->mem_pool, page, offset);
	if (clen <= PAGE_SIZE / 2)
		zram_stat_dec(&zram->stats.good_compress);

out:
	zram_stat64_sub(zram, &zram->stats.compr_size, clen);
	zram_stat_dec(&zram->stats.pages_stored);

	zram->table[index].page = NULL;
	zram->table[index].offset = 0;
}
Example #2
0
static void zram_free_page(struct zram *zram, size_t index)
{
	int zlen;
	void *obj;
	u32 offset;
	struct page *page;

	/*
	 * No memory is allocated for zero filled pages.
	 * Simply clear corresponding table entry.
	 */
	if (zram_is_zero_page(zram, index)) {
		zram_clear_zero_page(zram, index);
		zram_dec_stat(zram, ZRAM_STAT_PAGES_ZERO);
		return;
	}

	zram_find_obj(zram, index, &page, &offset);
	if (!page)
		return;

	/* Uncompressed pages cosume whole page, so offset is zero */
	if (unlikely(!offset)) {
		zlen = PAGE_SIZE;
		__free_page(page);
		zram_dec_stat(zram, ZRAM_STAT_PAGES_EXPAND);
		goto out;
	}

	obj = kmap_atomic(page, KM_USER0) + offset;
	zlen = xv_get_object_size(obj);
	kunmap_atomic(obj, KM_USER0);

	xv_free(zram->mem_pool, page, offset);

out:
	zram_add_stat(zram, ZRAM_STAT_COMPR_SIZE, -zlen);
	zram_dec_stat(zram, ZRAM_STAT_PAGES_STORED);

	zram->table[index].addr = 0;
}